test_activation_op.py 120.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
qijun 已提交
15
import unittest
J
joejiong 已提交
16

Q
qijun 已提交
17
import numpy as np
C
Clementine 已提交
18
from scipy.special import expit, erf
J
joejiong 已提交
19

20
from op_test import OpTest, convert_float_to_uint16
21
import paddle
22
import paddle.nn.functional as F
J
joejiong 已提交
23 24
import paddle.fluid as fluid
import paddle.fluid.core as core
25
from paddle.fluid import Program, program_guard
26
from paddle.fluid.framework import _test_eager_guard
Q
qijun 已提交
27

28 29
paddle.enable_static()

Q
qijun 已提交
30

31
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
32 33 34 35 36 37
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
            self.assertRaises(TypeError, fluid.layers.sqrt, in1)
            # The input dtype of sqrt op must be float16, float32, float64.
38 39 40
            in2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32"
            )
Z
Zhaolong Xing 已提交
41 42
            self.assertRaises(TypeError, fluid.layers.sqrt, in2)

43 44 45
            in3 = fluid.layers.data(
                name='input3', shape=[12, 10], dtype="float16"
            )
Z
Zhaolong Xing 已提交
46 47 48
            fluid.layers.sqrt(x=in3)


C
chengduo 已提交
49
class TestActivation(OpTest):
Q
qijun 已提交
50 51
    def setUp(self):
        self.op_type = "exp"
52
        self.init_dtype()
53
        self.init_shape()
54
        self.init_kernel_type()
C
chentianyu03 已提交
55 56
        self.check_eager = True
        self.python_api = paddle.exp
57

58
        np.random.seed(2049)
59
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
60 61 62 63
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
64 65

    def test_check_output(self):
66 67 68 69
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_output(check_eager=check_eager)
Q
qijun 已提交
70 71

    def test_check_grad(self):
72 73
        if self.dtype == np.float16:
            return
74 75 76 77
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
Q
qijun 已提交
78

79
    def init_dtype(self):
80
        self.dtype = np.float64
81

82 83 84
    def init_shape(self):
        self.shape = [11, 17]

85 86 87
    def init_kernel_type(self):
        pass

Q
qijun 已提交
88

89 90 91 92 93
class TestActivation_ZeroDim(TestActivation):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
94 95 96
class TestExpm1(TestActivation):
    def setUp(self):
        self.op_type = "expm1"
97
        self.python_api = paddle.expm1
R
ronnywang 已提交
98
        self.init_dtype()
99
        self.init_shape()
R
ronnywang 已提交
100 101

        np.random.seed(2049)
102
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
R
ronnywang 已提交
103 104 105 106 107 108
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
109 110 111 112
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
R
ronnywang 已提交
113 114


115 116 117 118 119
class TestExpm1_ZeroDim(TestExpm1):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
class TestExpm1API(unittest.TestCase):
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        paddle.enable_static()

        def run(place):
            with paddle.static.program_guard(paddle.static.Program()):
                X = paddle.fluid.data('X', self.shape, dtype=self.dtype)
                out = paddle.expm1(X)
                exe = paddle.static.Executor(place)
                res = exe.run(feed={'X': self.x})
            for r in res:
144
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
145 146 147 148 149 150 151 152 153

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
        def run(place):
            paddle.disable_static(place)
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
154
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167
            paddle.enable_static()

        for place in self.place:
            run(place)

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            X = paddle.fluid.data('X', self.shape, dtype='int32')
            self.assertRaises(TypeError, paddle.expm1, X)
        # The input dtype must be float16, float32, float64.


168 169 170
class TestParameter(object):
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
W
WuHaobo 已提交
171
            np_x = np.array([0.1])
172
            data = fluid.layers.data(name="X", shape=[1])
W
WuHaobo 已提交
173
            out = eval("paddle.%s(data, name='Y')" % self.op_type)
174 175
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
176
            (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
W
WuHaobo 已提交
177
            expected = eval("np.%s(np_x)" % self.op_type)
178
            np.testing.assert_allclose(result, expected, rtol=1e-05)
179 180 181 182 183 184 185

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
186
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
187 188


C
chengduo 已提交
189
class TestSigmoid(TestActivation):
Q
qijun 已提交
190 191
    def setUp(self):
        self.op_type = "sigmoid"
192
        self.init_dtype()
193
        self.init_shape()
194

195
        np.random.seed(1024)
196
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
197 198 199 200
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
201

202 203 204
    def init_dtype(self):
        self.dtype = np.float32

205
    def test_check_grad(self):
206 207 208 209
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

210

211 212 213 214 215
class TestSigmoid_ZeroDim(TestSigmoid):
    def init_shape(self):
        self.shape = []


216 217 218
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
219 220 221 222
class TestSigmoidBF16(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
        self.init_dtype()
223
        self.init_shape()
224 225

        np.random.seed(1024)
226
        x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
227 228 229 230 231 232 233 234 235 236
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

237 238 239
    def init_shape(self):
        self.shape = [11, 17]

240 241 242 243 244 245 246 247 248
    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out')


249 250 251 252 253 254 255 256
'''
class TestSigmoidBF16_ZeroDim(TestSigmoidBF16):

    def init_shape(self):
        self.shape = []
'''


M
minghaoBD 已提交
257 258 259 260
class TestSilu(TestActivation):
    def setUp(self):
        self.op_type = "silu"
        self.init_dtype()
261
        self.init_shape()
M
minghaoBD 已提交
262 263

        np.random.seed(1024)
264
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
M
minghaoBD 已提交
265 266 267 268 269 270 271 272 273 274 275 276 277 278
        out = x / (np.exp(-x) + 1)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}

    def init_dtype(self):
        self.dtype = np.float32

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


279 280 281 282 283
class TestSilu_ZeroDim(TestSilu):
    def init_shape(self):
        self.shape = []


M
minghaoBD 已提交
284 285 286 287
class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
288 289 290
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
M
minghaoBD 已提交
291
            else paddle.CPUPlace()
292
        )
M
minghaoBD 已提交
293 294 295 296 297 298 299 300 301 302 303 304

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [11, 17])
            out1 = F.silu(x)
            m = paddle.nn.Silu()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in res:
305
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
306 307 308 309 310 311 312 313 314

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
315
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
316 317 318 319 320 321 322
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.silu, 1)
            # The input dtype must be float16, float32, float64.
323 324 325
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
M
minghaoBD 已提交
326 327
            self.assertRaises(TypeError, F.silu, x_int32)
            # support the input dtype is float16
328 329 330
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
M
minghaoBD 已提交
331 332 333
            F.silu(x_fp16)


C
chengduo 已提交
334
class TestLogSigmoid(TestActivation):
335 336
    def setUp(self):
        self.op_type = "logsigmoid"
337
        self.init_dtype()
338
        self.init_shape()
339

340
        np.random.seed(2048)
341
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
342 343
        out = np.log(1 / (1 + np.exp(-x)))

344
        self.inputs = {'X': x}
345
        self.outputs = {'Out': out}
346 347

    def test_check_grad(self):
348 349
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
350
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
351 352


353 354 355 356 357
class TestLogSigmoid_ZeroDim(TestLogSigmoid):
    def init_shape(self):
        self.shape = []


358
class TestLogSigmoidAPI(unittest.TestCase):
359
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
360
    def setUp(self):
361
        np.random.seed(1024)
362
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
363 364 365
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
366
            else paddle.CPUPlace()
367
        )
368 369

    def test_static_api(self):
370
        paddle.enable_static()
371
        with paddle.static.program_guard(paddle.static.Program()):
372
            x = paddle.fluid.data('X', [11, 17])
373
            out1 = F.log_sigmoid(x)
374 375 376 377 378 379
            m = paddle.nn.LogSigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in res:
380
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
381 382 383 384

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
385
        out1 = F.log_sigmoid(x)
386 387 388 389
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
390
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
391 392
        paddle.enable_static()

393
    def test_fluid_api(self):
394
        paddle.enable_static()
395
        with paddle.static.program_guard(paddle.static.Program()):
396
            x = paddle.fluid.data('X', [11, 17])
397 398 399 400
            out = paddle.fluid.layers.logsigmoid(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
401
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
402

403
    def test_errors(self):
404
        paddle.enable_static()
405 406
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
407
            self.assertRaises(TypeError, F.log_sigmoid, 1)
408
            # The input dtype must be float16, float32, float64.
409 410 411
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
412
            self.assertRaises(TypeError, F.log_sigmoid, x_int32)
413
            # support the input dtype is float16
414 415 416
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
417
            F.log_sigmoid(x_fp16)
418 419


420
class TestTanh(TestActivation, TestParameter):
421 422
    def setUp(self):
        self.op_type = "tanh"
423
        self.init_dtype()
424 425
        self.init_shape()

426
        np.random.seed(1024)
427
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
428 429 430 431
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
432 433

    def test_check_grad(self):
434 435
        if self.dtype == np.float16:
            return
436
        self.check_grad(['X'], 'Out')
437

438
    def init_dtype(self):
439
        # TODO If dtype is float64, the output (Out) has diff at CPUPlace
440 441 442 443
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

444

445 446 447 448 449
class TestTanh_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


W
WangXi 已提交
450 451 452 453
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
454
        np.random.seed(1024)
W
WangXi 已提交
455
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
456 457 458
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
W
WangXi 已提交
459
            else paddle.CPUPlace()
460
        )
461 462 463 464
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
465 466

    def test_static_api(self):
467
        paddle.enable_static()
W
WangXi 已提交
468
        with paddle.static.program_guard(paddle.static.Program()):
469
            x = paddle.fluid.data('X', [10, 12], self.dtype)
470
            out1 = self.tanh(x)
W
WangXi 已提交
471 472 473 474 475 476
            th = paddle.nn.Tanh()
            out2 = th(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.tanh(self.x_np)
        for r in res:
477
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
478 479 480

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
481
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
482 483 484 485 486 487
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
488
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
489 490 491
        paddle.enable_static()

    def test_fluid_api(self):
492
        paddle.enable_static()
W
WangXi 已提交
493 494 495 496 497 498
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12], self.dtype)
            out = fluid.layers.tanh(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tanh(self.x_np)
499
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
W
WangXi 已提交
500 501

    def test_errors(self):
502
        paddle.enable_static()
W
WangXi 已提交
503 504
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
505
            self.assertRaises(TypeError, self.tanh, 1)
W
WangXi 已提交
506
            # The input dtype must be float16, float32.
507 508 509
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
510
            self.assertRaises(TypeError, self.tanh, x_int32)
W
WangXi 已提交
511
            # support the input dtype is float16
512 513 514
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
515 516 517 518 519 520 521
            self.tanh(x_fp16)


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
522 523


524
class TestAtan(TestActivation, TestParameter):
525 526 527
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()
528
        self.init_shape()
529

530
        np.random.seed(1024)
531
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
532 533 534 535 536 537 538 539
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
540
        self.check_grad(['X'], 'Out')
541

W
WuHaobo 已提交
542 543 544 545 546 547 548
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
            np_x = np.array([0.1])
            data = fluid.layers.data(name="X", shape=[1])
            out = paddle.atan(data, name='Y')
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
549
            (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
W
WuHaobo 已提交
550 551 552
            expected = np.arctan(np_x)
            self.assertEqual(result, expected)

553 554 555 556 557 558 559 560
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

561

562 563 564 565 566
class TestAtan_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


567 568 569 570
class TestSinh(TestActivation):
    def setUp(self):
        self.op_type = "sinh"
        self.init_dtype()
571
        self.init_shape()
572

573
        np.random.seed(1024)
574
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
575 576 577 578 579 580 581 582 583 584
        out = np.sinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

585 586 587 588 589 590 591

class TestSinh_ZeroDim(TestSinh):
    def init_shape(self):
        self.shape = []


class TestSinhAPI(unittest.TestCase):
592 593 594 595 596 597
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = fluid.layers.sinh(x).numpy()
            z_expected = np.sinh(np_x)
598
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
599 600 601 602

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
603 604 605 606 607 608 609 610 611
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
            data_x = fluid.layers.data(
                name="data_x",
                shape=test_data_shape,
                append_batch_size=False,
                dtype="float32",
            )
612 613 614 615

            pd_sinh_out = fluid.layers.sinh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
616 617 618 619 620
            (np_sinh_res,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[pd_sinh_out],
            )
621 622

        expected_res = np.sinh(input_x)
623
        np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
624 625 626 627

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
628 629 630
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
            loss = fluid.layers.sinh(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.sinh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.sinh, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.sinh(x_fp16)


class TestCosh(TestActivation):
    def setUp(self):
        self.op_type = "cosh"
        self.init_dtype()
656
        self.init_shape()
657

658
        np.random.seed(1024)
659
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
660 661 662 663 664 665 666 667 668 669
        out = np.cosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

670 671 672 673 674 675 676

class TestCosh_ZeroDim(TestCosh):
    def init_shape(self):
        self.shape = []


class TestCoshAPI(unittest.TestCase):
677 678 679 680 681 682
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = fluid.layers.cosh(x).numpy()
            z_expected = np.cosh(np_x)
683
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
684 685 686 687

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
688 689 690 691 692 693 694 695 696
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
            data_x = fluid.layers.data(
                name="data_x",
                shape=test_data_shape,
                append_batch_size=False,
                dtype="float32",
            )
697 698 699 700

            pd_cosh_out = paddle.cosh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
701 702 703 704 705
            (np_cosh_res,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[pd_cosh_out],
            )
706 707

        expected_res = np.cosh(input_x)
708
        np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
709 710 711 712

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
713 714 715
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
            loss = fluid.layers.cosh(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.cosh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.cosh, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.cosh(x_fp16)


737 738 739 740 741 742
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
K
Kavya Srinet 已提交
743 744
    def setUp(self):
        self.op_type = "tanh_shrink"
745
        self.init_dtype()
746
        self.init_shape()
747

748
        np.random.seed(1024)
749
        x = np.random.uniform(10, 20, self.shape).astype(self.dtype)
750
        out = ref_tanhshrink(x)
751

752
        self.inputs = {'X': x}
753
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
754 755

    def test_check_grad(self):
756 757
        if self.dtype == np.float16:
            return
758
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
759

760

761 762 763 764 765
class TestTanhshrink_ZeroDim(TestTanhshrink):
    def init_shape(self):
        self.shape = []


766 767 768
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
769
        np.random.seed(1024)
770
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
771 772 773
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
774
            else paddle.CPUPlace()
775
        )
776 777

    def test_static_api(self):
778
        paddle.enable_static()
779
        with paddle.static.program_guard(paddle.static.Program()):
780
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
781 782 783 784 785 786 787
            out1 = F.tanhshrink(x)
            tanhshrink = paddle.nn.Tanhshrink()
            out2 = tanhshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_tanhshrink(self.x_np)
        for r in res:
788
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
789 790 791 792 793 794 795 796 797

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
798
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
799 800 801
        paddle.enable_static()

    def test_fluid_api(self):
802
        paddle.enable_static()
803 804 805 806 807 808
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.tanh_shrink(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_tanhshrink(self.x_np)
809
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
810 811

    def test_errors(self):
812
        paddle.enable_static()
813 814 815 816
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.tanhshrink, 1)
            # The input dtype must be float16, float32, float64.
817 818 819
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
820 821
            self.assertRaises(TypeError, F.tanhshrink, x_int32)
            # support the input dtype is float16
822 823 824
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
825 826 827
            F.tanhshrink(x_fp16)


828 829 830 831 832 833
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
834
class TestHardShrink(TestActivation):
835 836
    def setUp(self):
        self.op_type = "hard_shrink"
837
        self.init_dtype()
838
        self.init_shape()
839

840 841
        self.threshold = 0.5
        self.set_attrs()
842
        np.random.seed(1024)
843
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10
844
        out = ref_hardshrink(x, self.threshold)
845

846
        self.attrs = {'threshold': self.threshold}
847
        self.inputs = {'X': x}
848
        self.outputs = {'Out': out}
849

850 851 852
    def init_shape(self):
        self.shape = [10, 12]

853 854 855
    def set_attrs(self):
        pass

856
    def test_check_grad(self):
857 858
        if self.dtype == np.float16:
            return
859
        self.check_grad(['X'], 'Out')
860 861


862 863 864 865 866
class TestHardShrink_threshold_negative(TestHardShrink):
    def set_attrs(self):
        self.threshold = -0.1


867 868 869 870 871 872 873 874
'''
class TestHardShrink_ZeroDim(TestHardShrink):

    def init_shape(self):
        self.shape = []
'''


875 876 877
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
878
        np.random.seed(1024)
879
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
880 881 882
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
883
            else paddle.CPUPlace()
884
        )
885 886

    def test_static_api(self):
887
        paddle.enable_static()
888
        with paddle.static.program_guard(paddle.static.Program()):
889
            x = paddle.fluid.data('X', [10, 12])
890 891 892 893 894 895 896
            out1 = F.hardshrink(x)
            hd = paddle.nn.Hardshrink()
            out2 = hd(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in res:
897
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
898 899 900

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
901
        x = paddle.to_tensor(self.x_np)
902 903 904 905 906
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
907
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
908 909 910 911 912 913

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
914
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
915 916 917
        paddle.enable_static()

    def test_fluid_api(self):
918
        paddle.enable_static()
919 920 921 922 923 924
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
            out = fluid.layers.hard_shrink(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardshrink(self.x_np, 0.5)
925
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
926

927
    def test_errors(self):
928
        paddle.enable_static()
929
        with paddle.static.program_guard(paddle.static.Program()):
930
            # The input type must be Variable.
931
            self.assertRaises(TypeError, F.hardshrink, 1)
932
            # The input dtype must be float16, float32, float64.
933 934 935
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
936
            self.assertRaises(TypeError, F.hardshrink, x_int32)
937
            # support the input dtype is float16
938 939 940
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
941
            F.hardshrink(x_fp16)
942 943


944 945 946 947 948 949 950 951 952 953 954
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
955
        np.random.seed(1024)
956
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
957 958 959
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
960
            else paddle.CPUPlace()
961
        )
962 963

    def test_static_api(self):
964
        paddle.enable_static()
965
        with paddle.static.program_guard(paddle.static.Program()):
966
            x = paddle.fluid.data('X', [10, 12])
967 968 969 970 971 972 973
            out1 = F.hardtanh(x)
            m = paddle.nn.Hardtanh()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardtanh(self.x_np)
        for r in res:
974
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
975 976 977

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
978
        x = paddle.to_tensor(self.x_np)
979 980 981 982 983
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
984
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
985 986 987 988 989 990

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
991
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
992 993 994
        paddle.enable_static()

    def test_errors(self):
995
        paddle.enable_static()
996 997 998 999
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.hardtanh, 1)
            # The input dtype must be float16, float32, float64.
1000 1001 1002
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1003 1004
            self.assertRaises(TypeError, F.hardtanh, x_int32)
            # support the input dtype is float16
1005 1006 1007
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1008 1009 1010
            F.hardtanh(x_fp16)


1011 1012 1013
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
1014 1015
        out - threshold
    )
1016 1017 1018 1019
    return out


class TestSoftshrink(TestActivation):
1020 1021
    def setUp(self):
        self.op_type = "softshrink"
1022 1023
        self.check_eager = True
        self.python_api = paddle.nn.functional.softshrink
1024
        self.init_dtype()
1025
        self.init_shape()
1026

1027
        threshold = 0.8
1028

1029
        np.random.seed(1023)
1030
        x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype)
1031 1032 1033
        out = ref_softshrink(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"lambda": threshold}
1034
        self.outputs = {'Out': out}
1035 1036

    def test_check_grad(self):
1037 1038
        if self.dtype == np.float16:
            return
1039
        self.check_grad(['X'], 'Out', check_eager=True)
1040

1041

1042 1043 1044 1045 1046
class TestSoftshrink_ZeroDim(TestSoftshrink):
    def init_shape(self):
        self.shape = []


1047 1048 1049 1050
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
1051
        np.random.seed(1024)
1052
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
1053 1054 1055
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1056
            else paddle.CPUPlace()
1057
        )
1058 1059

    def test_static_api(self):
1060
        paddle.enable_static()
1061
        with paddle.static.program_guard(paddle.static.Program()):
1062
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1063 1064 1065 1066 1067 1068 1069
            out1 = F.softshrink(x, self.threshold)
            softshrink = paddle.nn.Softshrink(self.threshold)
            out2 = softshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in res:
1070
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1071 1072 1073 1074 1075 1076 1077 1078 1079

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
1080
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1081 1082 1083
        paddle.enable_static()

    def test_fluid_api(self):
1084
        paddle.enable_static()
1085 1086 1087 1088 1089 1090
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softshrink(x, self.threshold)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softshrink(self.x_np, self.threshold)
1091
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
1092

1093
    def test_errors(self):
1094
        paddle.enable_static()
1095
        with paddle.static.program_guard(paddle.static.Program()):
1096
            # The input type must be Variable.
1097
            self.assertRaises(TypeError, F.softshrink, 1)
1098
            # The input dtype must be float16, float32, float64.
1099 1100 1101
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1102
            self.assertRaises(TypeError, F.softshrink, x_int32)
1103
            # The threshold must be no less than zero
1104 1105 1106
            x_fp32 = paddle.fluid.data(
                name='x_fp32', shape=[12, 10], dtype='float32'
            )
1107
            self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
1108
            # support the input dtype is float16
1109 1110 1111
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1112
            F.softshrink(x_fp16)
1113 1114


1115
class TestSqrt(TestActivation, TestParameter):
1116 1117
    def setUp(self):
        self.op_type = "sqrt"
1118
        self.python_api = paddle.sqrt
1119
        self.init_dtype()
1120
        self.init_shape()
1121

1122
        np.random.seed(1023)
1123
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
1124 1125 1126 1127
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1128 1129

    def test_check_grad(self):
1130 1131
        if self.dtype == np.float16:
            return
1132 1133 1134 1135
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
1136

1137

1138 1139 1140 1141 1142
class TestSqrt_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []


1143 1144 1145
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
1146 1147 1148
class TestSqrtBF16(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
1149
        self.python_api = paddle.sqrt
1150
        self.init_dtype()
1151
        self.init_shape()
1152 1153

        np.random.seed(1023)
1154
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

1165 1166 1167
    def init_shape(self):
        self.shape = [11, 17]

1168 1169
    def test_check_output(self):
        place = core.CUDAPlace(0)
1170
        self.check_output_with_place(place, check_eager=True)
1171 1172 1173

    def test_check_grad(self):
        place = core.CUDAPlace(0)
1174
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1175 1176


Z
zhoukunsheng 已提交
1177 1178 1179
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
Z
zyfncg 已提交
1180
        self.python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1181
        self.init_dtype()
1182
        self.init_shape()
Z
zhoukunsheng 已提交
1183

1184
        np.random.seed(1024)
1185
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
1186 1187 1188 1189 1190
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1191 1192 1193
    def init_shape(self):
        self.shape = [10, 12]

Z
zhoukunsheng 已提交
1194 1195 1196
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1197 1198 1199
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.0005, check_eager=True
        )
Z
zhoukunsheng 已提交
1200 1201


1202 1203 1204 1205 1206 1207 1208 1209
'''
class TestRsqrt_ZeroDim(TestRsqrt):

    def init_shape(self):
        self.shape = []
'''


C
chengduo 已提交
1210
class TestAbs(TestActivation):
1211 1212
    def setUp(self):
        self.op_type = "abs"
1213
        self.init_dtype()
1214
        self.init_shape()
1215

1216
        np.random.seed(1024)
1217
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
C
chengduo 已提交
1218
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1219
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1220
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1221 1222
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1223 1224 1225 1226
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1227

1228 1229 1230
    def init_shape(self):
        self.shape = [4, 25]

1231
    def test_check_grad(self):
1232 1233
        if self.dtype == np.float16:
            return
1234
        self.check_grad(['X'], 'Out', check_eager=False)
1235

1236

1237 1238 1239 1240 1241
class TestAbs_ZeroDim(TestAbs):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1242
class TestCeil(TestActivation):
D
dzhwinter 已提交
1243 1244
    def setUp(self):
        self.op_type = "ceil"
1245 1246
        self.check_eager = True
        self.python_api = paddle.ceil
1247
        self.init_dtype()
1248
        self.init_shape()
1249

1250
        np.random.seed(1024)
1251
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1252 1253 1254 1255
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1256

1257 1258 1259
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1260
    # The same reason with TestFloor
C
chengduo 已提交
1261
    def test_check_grad(self):
1262 1263 1264
        pass


1265 1266 1267 1268 1269
class TestCeil_ZeroDim(TestCeil):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1270
class TestFloor(TestActivation):
D
dzhwinter 已提交
1271 1272
    def setUp(self):
        self.op_type = "floor"
1273 1274
        self.check_eager = True
        self.python_api = paddle.floor
1275
        self.init_dtype()
1276
        self.init_shape()
1277

1278
        np.random.seed(1024)
1279
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1280 1281 1282 1283
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1284

1285 1286 1287
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1288
    # the gradient on floor, ceil, round is undefined.
1289
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1290 1291
    # The same reason with TestFloor
    def test_check_grad(self):
1292 1293 1294
        pass


1295 1296 1297 1298 1299
class TestFloor_ZeroDim(TestFloor):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1300
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
1301 1302
    def setUp(self):
        self.op_type = "cos"
1303
        self.init_dtype()
1304
        self.init_shape()
1305

1306
        np.random.seed(1024)
1307
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1308 1309 1310 1311
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
1312

1313 1314 1315
    def init_shape(self):
        self.shape = [10, 12]

C
add sin  
chengduoZH 已提交
1316
    def test_check_grad(self):
1317 1318
        if self.dtype == np.float16:
            return
1319
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
1320

1321

1322 1323 1324 1325 1326
class TestCos_ZeroDim(TestCos):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
1327 1328 1329 1330 1331
class TestTan(TestActivation):
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
        self.init_dtype()
1332 1333
        self.init_shape()

J
joejiong 已提交
1334
        self.dtype = 'float32'
1335
        self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1336 1337 1338
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
J
joejiong 已提交
1339
            else paddle.CPUPlace()
1340
        )
J
joejiong 已提交
1341 1342 1343 1344 1345 1346

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}

1347 1348 1349
    def init_shape(self):
        self.shape = [10, 12]

J
joejiong 已提交
1350 1351 1352 1353 1354
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365

class TestTan_ZeroDim(TestTan):
    def init_shape(self):
        self.shape = []


class TestTanAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(1024)
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1366 1367 1368
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1369
            else paddle.CPUPlace()
1370
        )
1371

J
joejiong 已提交
1372 1373 1374 1375 1376
    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1377
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1378 1379 1380 1381 1382
        paddle.enable_static()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
1383
            x = paddle.static.data('X', [11, 17], self.dtype)
J
joejiong 已提交
1384 1385 1386 1387
            out = paddle.tan(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tan(self.x_np)
1388
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1389 1390 1391 1392

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
1393 1394 1395
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
J
joejiong 已提交
1396 1397 1398 1399 1400 1401 1402 1403
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1404 1405 1406 1407
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()
1408
        self.init_shape()
1409

1410
        np.random.seed(1024)
1411
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1412 1413 1414 1415 1416
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1417 1418 1419
    def init_shape(self):
        self.shape = [10, 12]

1420 1421 1422
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1423
        self.check_grad(['X'], 'Out')
1424 1425


1426 1427 1428 1429 1430
class TestAcos_ZeroDim(TestAcos):
    def init_shape(self):
        self.shape = []


1431
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
1432 1433
    def setUp(self):
        self.op_type = "sin"
1434
        self.init_dtype()
1435
        self.init_shape()
1436

1437
        np.random.seed(1024)
1438
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1439 1440 1441 1442
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
1443

1444 1445 1446
    def init_shape(self):
        self.shape = [10, 12]

C
add cos  
chengduoZH 已提交
1447
    def test_check_grad(self):
1448 1449
        if self.dtype == np.float16:
            return
1450
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
1451 1452


1453 1454 1455 1456 1457
class TestSin_ZeroDim(TestSin):
    def init_shape(self):
        self.shape = []


1458 1459 1460 1461
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()
1462
        self.init_shape()
1463

1464
        np.random.seed(2048)
1465
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1466 1467 1468 1469 1470
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1471 1472 1473
    def init_shape(self):
        self.shape = [10, 12]

1474 1475 1476
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1477
        self.check_grad(['X'], 'Out')
1478 1479


1480 1481 1482 1483 1484
class TestAsin_ZeroDim(TestAsin):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1485 1486 1487 1488
class TestAcosh(TestActivation):
    def setUp(self):
        self.op_type = "acosh"
        self.init_dtype()
1489
        self.init_shape()
X
xiaoting 已提交
1490 1491

        np.random.seed(1024)
1492
        x = np.random.uniform(2, 3, self.shape).astype(self.dtype)
X
xiaoting 已提交
1493 1494 1495 1496 1497
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1498 1499 1500
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1501 1502 1503 1504 1505 1506
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1507 1508 1509 1510 1511
class TestAcosh_ZeroDim(TestAcosh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1512 1513 1514 1515
class TestAsinh(TestActivation):
    def setUp(self):
        self.op_type = "asinh"
        self.init_dtype()
1516
        self.init_shape()
X
xiaoting 已提交
1517 1518

        np.random.seed(1024)
1519
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
X
xiaoting 已提交
1520 1521 1522 1523 1524
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1525 1526 1527
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1528 1529 1530 1531 1532 1533
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1534 1535 1536 1537 1538
class TestAsinh_ZeroDim(TestAsinh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1539 1540 1541 1542
class TestAtanh(TestActivation):
    def setUp(self):
        self.op_type = "atanh"
        self.init_dtype()
1543
        self.init_shape()
X
xiaoting 已提交
1544 1545

        np.random.seed(400)
1546
        x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype)
X
xiaoting 已提交
1547 1548 1549 1550 1551
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1552 1553 1554
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1555 1556 1557 1558 1559 1560
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1561 1562 1563 1564 1565
class TestAtanh_ZeroDim(TestAtanh):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1566
class TestRound(TestActivation):
D
dzhwinter 已提交
1567 1568
    def setUp(self):
        self.op_type = "round"
1569 1570
        self.check_eager = True
        self.python_api = paddle.round
1571
        self.init_dtype()
1572
        self.init_shape()
1573

1574
        np.random.seed(1024)
1575
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1576 1577 1578 1579
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1580

1581 1582 1583
    def init_shape(self):
        self.shape = [10, 12]

C
chengduo 已提交
1584
    def test_check_grad(self):
1585 1586 1587
        pass


1588 1589 1590 1591 1592
class TestRound_ZeroDim(TestRound):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1593
class TestRelu(TestActivation):
1594
    def setUp(self):
Q
qijun 已提交
1595
        self.op_type = "relu"
K
Kexin Zhao 已提交
1596
        self.init_dtype()
1597
        self.init_shape()
K
Kexin Zhao 已提交
1598

1599
        np.random.seed(1024)
1600
        if self.dtype == np.uint16:
1601
            x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
1602 1603 1604 1605 1606
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = convert_float_to_uint16(np.maximum(x, 0))
            self.inputs = {'X': convert_float_to_uint16(x)}
        else:
1607
            x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1608 1609 1610 1611
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = np.maximum(x, 0)
            self.inputs = {'X': x}
K
Kexin Zhao 已提交
1612 1613

        self.outputs = {'Out': out}
1614 1615

    def test_check_grad(self):
K
Kexin Zhao 已提交
1616 1617
        if self.dtype == np.float16:
            return
1618
        self.check_grad(['X'], 'Out')
A
Adam 已提交
1619 1620


1621 1622 1623 1624 1625
class TestRelu_ZeroDim(TestRelu):
    def init_shape(self):
        self.shape = []


1626 1627 1628
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1629
        np.random.seed(1024)
1630
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1631 1632 1633
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1634
            else paddle.CPUPlace()
1635
        )
1636 1637 1638 1639
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1640 1641

    def test_static_api(self):
1642
        paddle.enable_static()
1643
        with paddle.static.program_guard(paddle.static.Program()):
1644
            x = paddle.fluid.data('X', [10, 12])
1645
            out1 = self.relu(x)
1646 1647 1648 1649 1650 1651
            m = paddle.nn.ReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.maximum(self.x_np, 0)
        for r in res:
1652
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1653 1654 1655 1656 1657

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1658 1659
        out1 = m(x)
        out2 = self.relu(x)
1660 1661
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1662
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1663 1664
        paddle.enable_static()

1665
    def test_errors(self):
1666
        paddle.enable_static()
1667
        with paddle.static.program_guard(paddle.static.Program()):
1668
            # The input type must be Variable.
1669
            self.assertRaises(TypeError, self.relu, 1)
1670
            # The input dtype must be float16, float32, float64.
1671 1672 1673
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
1674
            self.assertRaises(TypeError, self.relu, x_int32)
1675
            # support the input dtype is float16
1676 1677 1678
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
1679 1680 1681 1682 1683 1684 1685
            self.relu(x_fp16)


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1686 1687


1688 1689 1690 1691 1692 1693
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1694
class TestLeakyRelu(TestActivation):
1695 1696 1697
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1698 1699 1700
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()
1701
        self.init_shape()
1702
        alpha = self.get_alpha()
A
Adam 已提交
1703

1704
        np.random.seed(1024)
1705
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
A
Adam 已提交
1706
        # The same reason with TestAbs
1707 1708
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1709

1710
        self.inputs = {'X': x}
A
Adam 已提交
1711
        self.outputs = {'Out': out}
1712
        self.attrs = {'alpha': alpha}
A
Adam 已提交
1713 1714 1715 1716

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1717
        self.check_grad(['X'], 'Out')
1718 1719


1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
class TestLeakyReluAlpha1(TestLeakyRelu):
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
    def get_alpha(self):
        return -2.0


1735 1736 1737 1738 1739
class TestLeakyRelu_ZeroDim(TestLeakyRelu):
    def init_shape(self):
        self.shape = []


1740 1741 1742 1743
class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    # fluid.layers.leaky_relu
    def setUp(self):
1744
        np.random.seed(1024)
1745
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1746 1747 1748
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1749
            else paddle.CPUPlace()
1750
        )
1751 1752

    def test_static_api(self):
1753
        paddle.enable_static()
1754
        with paddle.static.program_guard(paddle.static.Program()):
1755
            x = paddle.fluid.data('X', [10, 12])
1756 1757 1758 1759 1760 1761 1762
            out1 = F.leaky_relu(x)
            m = paddle.nn.LeakyReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_leaky_relu(self.x_np)
        for r in res:
1763
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1764 1765 1766

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
1767
        x = paddle.to_tensor(self.x_np)
1768 1769 1770 1771 1772
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
1773
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1774 1775 1776 1777 1778 1779

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
1780
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1781 1782 1783
        paddle.enable_static()

    def test_fluid_api(self):
1784
        paddle.enable_static()
1785 1786 1787 1788 1789 1790
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
            out = fluid.layers.leaky_relu(x, 0.01)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_leaky_relu(self.x_np)
1791
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
1792

1793
    def test_errors(self):
1794
        paddle.enable_static()
1795
        with paddle.static.program_guard(paddle.static.Program()):
1796
            # The input type must be Variable.
1797
            self.assertRaises(TypeError, F.leaky_relu, 1)
1798
            # The input dtype must be float16, float32, float64.
1799 1800 1801
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1802 1803
            self.assertRaises(TypeError, F.leaky_relu, x_int32)
            # support the input dtype is float16
1804 1805 1806
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1807
            F.leaky_relu(x_fp16)
1808 1809


1810 1811
def gelu(x, approximate):
    if approximate:
1812 1813 1814 1815 1816 1817 1818 1819
        y_ref = (
            0.5
            * x
            * (
                1.0
                + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
            )
        )
1820 1821 1822 1823 1824 1825
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
1826 1827 1828
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1829
        self.init_shape()
1830
        approximate = True
1831
        np.random.seed(1024)
1832
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1833
        out = gelu(x, approximate)
C
Clementine 已提交
1834

1835
        self.inputs = {'X': x}
1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1849
        self.init_shape()
1850
        approximate = False
1851
        np.random.seed(2048)
1852
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1853
        out = gelu(x, approximate)
C
Clementine 已提交
1854

1855
        self.inputs = {'X': x}
C
Clementine 已提交
1856
        self.outputs = {'Out': out}
1857
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
1858 1859 1860 1861

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1862
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
1863 1864


1865 1866 1867 1868 1869
class TestGelu_ZeroDim(TestGelu):
    def init_shape(self):
        self.shape = []


1870 1871 1872
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
1873
        np.random.seed(1024)
1874
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
1875 1876 1877
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1878
            else paddle.CPUPlace()
1879
        )
1880 1881

    def test_static_api(self):
1882
        paddle.enable_static()
1883
        with paddle.static.program_guard(paddle.static.Program()):
1884
            x = paddle.fluid.data('X', [11, 17])
1885 1886 1887 1888 1889 1890 1891
            out1 = F.gelu(x)
            m = paddle.nn.GELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = gelu(self.x_np, False)
        for r in res:
1892
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1893 1894 1895 1896 1897 1898 1899 1900 1901

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
1902
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1903 1904 1905 1906 1907 1908

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
1909
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1910 1911 1912
        paddle.enable_static()

    def test_errors(self):
1913
        paddle.enable_static()
1914 1915 1916 1917
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.gelu, 1)
            # The input dtype must be float16, float32, float64.
1918 1919 1920
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
1921 1922
            self.assertRaises(TypeError, F.gelu, x_int32)
            # support the input dtype is float16
1923 1924 1925
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
1926 1927 1928
            F.gelu(x_fp16)


C
chengduo 已提交
1929
class TestBRelu(TestActivation):
1930 1931
    def setUp(self):
        self.op_type = "brelu"
1932 1933
        self.init_dtype()

1934
        np.random.seed(1024)
Z
zhupengyang 已提交
1935
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
1936 1937
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
1938 1939
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
1940
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
1941 1942 1943
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
1944 1945 1946

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
1947
        self.outputs = {'Out': t}
1948 1949

    def test_check_grad(self):
1950 1951
        if self.dtype == np.float16:
            return
1952
        self.check_grad(['X'], 'Out')
1953

1954

1955 1956 1957 1958
class TestBreluAPI(unittest.TestCase):
    # test paddle.fluid.layers.brelu
    def setUp(self):
        np.random.seed(1024)
1959 1960
        self.t_min = 0.0
        self.t_max = 24.0
1961 1962 1963 1964 1965
        self.x_np = np.random.uniform(-1, 30, [10, 12]).astype('float32')
        self.out_ref = np.copy(self.x_np)
        self.out_ref[self.out_ref < self.t_min] = self.t_min
        self.out_ref[self.out_ref > self.t_max] = self.t_max
        self.out_ref = self.out_ref.astype('float32')
1966 1967 1968
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1969
            else paddle.CPUPlace()
1970
        )
1971 1972 1973 1974 1975 1976 1977

    def test_fluid_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', [10, 12])
            out = paddle.fluid.layers.brelu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
1978
            np.testing.assert_allclose(self.out_ref, res[0], rtol=1e-05)
1979 1980 1981 1982

            paddle.disable_static(self.place)
            x = paddle.to_tensor(self.x_np)
            out = paddle.fluid.layers.brelu(x)
1983
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
1984 1985
            paddle.enable_static()

1986 1987 1988 1989 1990 1991 1992 1993
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.brelu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.brelu, x_int32)
            # support the input dtype is float16
1994 1995 1996
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1997 1998 1999
            fluid.layers.brelu(x_fp16)


2000 2001 2002 2003 2004 2005 2006
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
2007
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
2008
    def setUp(self):
2009
        self.op_type = "relu6"
2010
        self.init_dtype()
2011
        self.init_shape()
2012
        self.python_api = paddle.nn.functional.relu6
2013

2014
        np.random.seed(1024)
2015
        x = np.random.uniform(-1, 10, self.shape).astype(self.dtype)
2016
        x[np.abs(x) < 0.005] = 0.02
2017
        out = ref_relu6(x)
2018

2019 2020
        self.inputs = {'X': x}
        self.attrs = {'threshold': 6.0}
2021
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
2022

2023 2024 2025
    def init_shape(self):
        self.shape = [10, 12]

2026 2027 2028
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2029
        self.check_grad(['X'], 'Out', check_eager=True)
2030 2031


2032 2033 2034 2035 2036
class TestRelu6_ZeroDim(TestRelu6):
    def init_shape(self):
        self.shape = []


2037 2038 2039
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
2040
        np.random.seed(1024)
2041 2042
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
2043 2044 2045
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2046
            else paddle.CPUPlace()
2047
        )
2048 2049

    def test_static_api(self):
2050
        paddle.enable_static()
2051
        with paddle.static.program_guard(paddle.static.Program()):
2052
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2053 2054 2055 2056 2057 2058 2059
            out1 = F.relu6(x)
            relu6 = paddle.nn.ReLU6()
            out2 = relu6(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_relu6(self.x_np)
        for r in res:
2060
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2061 2062 2063 2064 2065 2066 2067 2068 2069

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
2070
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2071 2072 2073
        paddle.enable_static()

    def test_fluid_api(self):
2074
        paddle.enable_static()
2075 2076 2077 2078 2079 2080
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.relu6(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_relu6(self.x_np)
2081
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2082

2083
    def test_errors(self):
2084
        paddle.enable_static()
2085
        with paddle.static.program_guard(paddle.static.Program()):
2086
            # The input type must be Variable.
2087
            self.assertRaises(TypeError, F.relu6, 1)
2088
            # The input dtype must be float16, float32, float64.
2089 2090 2091
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2092
            self.assertRaises(TypeError, F.relu6, x_int32)
2093
            # support the input dtype is float16
2094 2095 2096
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2097
            F.relu6(x_fp16)
2098 2099


2100
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
2101 2102 2103 2104
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
2105 2106 2107
    return (
        x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale
    ).astype(x_dtype)
2108 2109


H
huangjun12 已提交
2110 2111 2112 2113
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
2114
        self.init_shape()
2115
        self.python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
2116

2117
        np.random.seed(1024)
2118
        x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
H
huangjun12 已提交
2119 2120 2121
        threshold = 6.0
        scale = 6.0
        offset = 3.0
2122
        # the same with TestAbs
H
huangjun12 已提交
2123 2124
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
2125
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
2126

2127
        self.inputs = {'X': x}
H
huangjun12 已提交
2128 2129 2130
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

2131 2132 2133
    def init_shape(self):
        self.shape = [10, 12]

H
huangjun12 已提交
2134
    def test_check_grad(self):
2135 2136 2137 2138
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
H
huangjun12 已提交
2139 2140


2141 2142 2143 2144 2145
class TestHardSwish_ZeroDim(TestHardSwish):
    def init_shape(self):
        self.shape = []


2146 2147 2148 2149
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2150 2151 2152
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2153
            else paddle.CPUPlace()
2154
        )
2155 2156 2157

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
2158
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2159 2160 2161 2162 2163 2164 2165
            out1 = F.hardswish(x)
            m = paddle.nn.Hardswish()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardswish(self.x_np)
        for r in res:
2166
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2167 2168 2169

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
2170
        x = paddle.to_tensor([11648.0, 11448.0])
2171 2172 2173
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
2174
        out_ref = [11648.0, 11448.0]
2175
        for r in [out1, out2]:
2176
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2177
        paddle.enable_static()
2178 2179 2180 2181 2182 2183 2184 2185

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.hard_swish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardswish(self.x_np)
2186
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2187 2188 2189 2190

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.fluid.layers.hard_swish(x)
2191
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
2192 2193 2194 2195
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
2196
            # The input type must be Variable.
2197
            self.assertRaises(TypeError, F.hardswish, 1)
2198
            # The input dtype must be float16, float32, float64.
2199 2200 2201
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2202
            self.assertRaises(TypeError, F.hardswish, x_int32)
2203
            # support the input dtype is float16
2204 2205 2206
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2207
            F.hardswish(x_fp16)
2208

2209 2210 2211 2212 2213
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_dygraph_api()
            self.test_errors()

2214

C
chengduo 已提交
2215
class TestSoftRelu(TestActivation):
2216 2217
    def setUp(self):
        self.op_type = "soft_relu"
2218 2219
        self.init_dtype()

2220
        np.random.seed(4096)
2221
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2222
        threshold = 2.0
Q
qijun 已提交
2223 2224
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
2225
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
2226 2227 2228
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
2229 2230 2231 2232 2233
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
2234 2235

    def test_check_grad(self):
2236 2237
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
2238
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
2239

2240

2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253
class TestSoftReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.soft_relu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.soft_relu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.soft_relu(x_fp16)


2254
def elu(x, alpha):
Z
zhupengyang 已提交
2255
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
2256 2257 2258
    return out_ref.astype(x.dtype)


C
chengduo 已提交
2259
class TestELU(TestActivation):
2260 2261
    def setUp(self):
        self.op_type = "elu"
2262
        self.init_dtype()
2263
        self.init_shape()
2264

2265
        np.random.seed(1024)
2266
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
Z
zhupengyang 已提交
2267
        alpha = self.get_alpha()
2268
        out = elu(x, alpha)
2269 2270 2271 2272
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
2273
        self.outputs = {'Out': out}
2274

2275 2276 2277
    def init_shape(self):
        self.shape = [10, 12]

2278
    def test_check_grad(self):
2279 2280
        if self.dtype == np.float16:
            return
2281
        self.check_grad(['X'], 'Out')
2282

Z
zhupengyang 已提交
2283
    def get_alpha(self):
2284
        return 1.0
Z
zhupengyang 已提交
2285 2286 2287 2288 2289 2290


class TestELUAlpha(TestELU):
    def get_alpha(self):
        return -0.2

2291

2292 2293 2294 2295 2296
class TestELU_ZeroDim(TestELU):
    def init_shape(self):
        self.shape = []


2297 2298 2299
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
2300
        np.random.seed(1024)
2301
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2302 2303 2304
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2305
            else paddle.CPUPlace()
2306
        )
2307 2308 2309 2310
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2311 2312

    def test_static_api(self):
2313
        paddle.enable_static()
2314
        with paddle.static.program_guard(paddle.static.Program()):
2315
            x = paddle.fluid.data('X', [10, 12])
2316
            out1 = self.elu(x)
2317 2318 2319 2320 2321 2322
            m = paddle.nn.ELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = elu(self.x_np, 1.0)
        for r in res:
2323
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2324 2325 2326 2327

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
2328 2329
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2330 2331 2332 2333
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2334
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2335

2336 2337
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2338 2339 2340 2341
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2342
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2343 2344
        paddle.enable_static()

2345
    def test_errors(self):
2346
        paddle.enable_static()
2347 2348
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
2349
            self.assertRaises(TypeError, self.elu, 1)
2350
            # The input dtype must be float16, float32, float64.
2351 2352 2353
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
2354
            self.assertRaises(TypeError, self.elu, x_int32)
2355
            # support the input dtype is float16
2356 2357 2358
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
2359 2360 2361
            self.elu(x_fp16)


Z
zhupengyang 已提交
2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)
        paddle.enable_static()


2374 2375 2376 2377 2378 2379 2380 2381 2382
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()
2383
        self.init_shape()
2384

2385
        self.python_api = paddle.nn.functional.celu
2386
        np.random.seed(1024)
2387
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
2388 2389 2390 2391 2392 2393
        alpha = 1.5
        out = celu(x, alpha)
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {'Out': out}

2394 2395 2396
    def init_shape(self):
        self.shape = [10, 12]

2397 2398 2399
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2400
        self.check_grad(['X'], 'Out', check_eager=True)
2401 2402


2403 2404 2405 2406 2407
class TestCELU_ZeroDim(TestCELU):
    def init_shape(self):
        self.shape = []


2408 2409 2410 2411 2412
class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2413 2414 2415
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2416
            else paddle.CPUPlace()
2417
        )
2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out1 = self.celu(x, 1.5)
            m = paddle.nn.CELU(1.5)
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = celu(self.x_np, 1.5)
        for r in res:
2434
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2435 2436 2437 2438 2439 2440 2441 2442 2443 2444

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2445
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2446 2447 2448 2449 2450 2451 2452

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2453
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2454 2455 2456 2457 2458 2459 2460 2461
        paddle.enable_static()

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, self.celu, 1)
            # The input dtype must be float16, float32, float64.
2462 2463 2464
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
2465 2466
            self.assertRaises(TypeError, self.celu, x_int32)
            # The alpha must be not equal 0
2467 2468 2469
            x_fp32 = paddle.fluid.data(
                name='x_fp32', shape=[10, 12], dtype='float32'
            )
2470 2471
            self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
            # support the input dtype is float16
2472 2473 2474
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
2475 2476
            self.celu(x_fp16)

2477 2478 2479 2480 2481
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_dygraph_api()
            self.test_errors()

2482

C
chengduo 已提交
2483
class TestReciprocal(TestActivation):
Q
qijun 已提交
2484 2485
    def setUp(self):
        self.op_type = "reciprocal"
2486
        self.python_api = paddle.reciprocal
2487
        self.init_dtype()
2488
        self.init_shape()
2489

2490
        np.random.seed(1024)
2491
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2492 2493 2494 2495
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2496 2497

    def test_check_grad(self):
2498 2499
        if self.dtype == np.float16:
            return
2500 2501 2502 2503
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2504 2505


2506 2507 2508 2509 2510
class TestReciprocal_ZeroDim(TestReciprocal):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
2511
class TestLog(TestActivation):
Q
qijun 已提交
2512 2513
    def setUp(self):
        self.op_type = "log"
2514 2515
        self.check_eager = True
        self.python_api = paddle.log
2516
        self.init_dtype()
2517
        self.init_shape()
2518

2519
        np.random.seed(1024)
2520
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2521 2522 2523 2524
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2525 2526

    def test_check_grad(self):
2527 2528
        if self.dtype == np.float16:
            return
2529
        self.check_grad(['X'], 'Out', check_eager=True)
Q
qijun 已提交
2530

2531
    def test_error(self):
2532 2533 2534 2535 2536 2537
        in1 = fluid.layers.data(
            name="in1", shape=[11, 17], append_batch_size=False, dtype="int32"
        )
        in2 = fluid.layers.data(
            name="in2", shape=[11, 17], append_batch_size=False, dtype="int64"
        )
2538 2539 2540 2541

        self.assertRaises(TypeError, fluid.layers.log, in1)
        self.assertRaises(TypeError, fluid.layers.log, in2)

2542

2543 2544 2545 2546 2547
class TestLog_ZeroDim(TestLog):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2548 2549 2550
class TestLog2(TestActivation):
    def setUp(self):
        self.op_type = "log2"
2551 2552
        self.check_eager = True
        self.python_api = paddle.log2
J
joejiong 已提交
2553
        self.init_dtype()
2554
        self.init_shape()
J
joejiong 已提交
2555

2556
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2557 2558 2559 2560 2561 2562 2563 2564
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2565
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2566 2567 2568 2569 2570 2571 2572 2573 2574

    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log2, in1)
        self.assertRaises(TypeError, paddle.log2, in2)

    def test_api(self):
2575 2576 2577
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
J
joejiong 已提交
2578
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2579 2580 2581
            data_x = paddle.static.data(
                name="data_x", shape=[11, 17], dtype="float64"
            )
J
joejiong 已提交
2582 2583 2584 2585

            out1 = paddle.log2(data_x)
            exe = paddle.static.Executor(place=fluid.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2586 2587 2588 2589 2590
            (res1,) = exe.run(
                paddle.static.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
J
joejiong 已提交
2591
        expected_res = np.log2(input_x)
2592
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2593 2594 2595 2596 2597 2598 2599 2600

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2601
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2602 2603


2604 2605 2606 2607 2608
class TestLog2_ZeroDim(TestLog2):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2609 2610 2611
class TestLog10(TestActivation):
    def setUp(self):
        self.op_type = "log10"
2612 2613
        self.check_eager = True
        self.python_api = paddle.log10
J
joejiong 已提交
2614
        self.init_dtype()
2615
        self.init_shape()
J
joejiong 已提交
2616

2617
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2618 2619 2620 2621 2622 2623 2624 2625
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2626
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2627

2628 2629 2630 2631 2632 2633 2634

class TestLog10_ZeroDim(TestLog10):
    def init_shape(self):
        self.shape = []


class TestLog10API(unittest.TestCase):
J
joejiong 已提交
2635 2636 2637 2638 2639 2640 2641 2642
    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log10, in1)
        self.assertRaises(TypeError, paddle.log10, in2)

    def test_api(self):
2643 2644 2645
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
J
joejiong 已提交
2646
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2647 2648 2649
            data_x = paddle.static.data(
                name="data_x", shape=[11, 17], dtype="float64"
            )
J
joejiong 已提交
2650 2651 2652 2653

            out1 = paddle.log10(data_x)
            exe = paddle.static.Executor(place=paddle.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2654 2655 2656 2657 2658
            (res1,) = exe.run(
                paddle.static.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
J
joejiong 已提交
2659
        expected_res = np.log10(input_x)
2660
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2661 2662 2663 2664 2665 2666 2667 2668

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2669
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2670 2671


2672 2673 2674
class TestLog1p(TestActivation):
    def setUp(self):
        self.op_type = "log1p"
2675 2676
        self.check_eager = True
        self.python_api = paddle.log1p
2677
        self.init_dtype()
2678
        self.init_shape()
2679

2680
        np.random.seed(1024)
2681
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2682 2683 2684 2685 2686 2687 2688 2689
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2690
        self.check_grad(['X'], 'Out', check_eager=True)
2691

2692 2693 2694 2695 2696 2697 2698

class TestLog1p_ZeroDim(TestLog1p):
    def init_shape(self):
        self.shape = []


class TestLog1pAPI(unittest.TestCase):
2699 2700 2701
    def test_api(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2702 2703 2704 2705 2706 2707
            data_x = fluid.layers.data(
                name="data_x",
                shape=[11, 17],
                append_batch_size=False,
                dtype="float64",
            )
2708 2709 2710 2711

            out1 = paddle.log1p(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
2712 2713 2714 2715 2716
            (res1,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
2717
        expected_res = np.log1p(input_x)
2718
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
2719 2720 2721 2722 2723 2724 2725 2726

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
2727
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
2728 2729


C
chengduo 已提交
2730
class TestSquare(TestActivation):
Q
qijun 已提交
2731 2732
    def setUp(self):
        self.op_type = "square"
2733
        self.python_api = paddle.square
2734
        self.init_dtype()
2735
        self.init_shape()
2736

2737
        np.random.seed(1024)
2738
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2739 2740 2741 2742
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2743 2744

    def test_check_grad(self):
2745 2746
        if self.dtype == np.float16:
            return
2747 2748 2749
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.007, check_eager=True
        )
2750 2751 2752

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2753

2754

2755 2756 2757 2758 2759
class TestSquare_ZeroDim(TestSquare):
    def init_shape(self):
        self.shape = []


2760 2761 2762
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
2763 2764 2765
class TestSquareBF16(OpTest):
    def setUp(self):
        self.op_type = "square"
2766
        self.python_api = paddle.square
2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
2783
        self.check_output_with_place(place, check_eager=True)
2784 2785 2786

    def test_check_grad(self):
        place = core.CUDAPlace(0)
2787 2788 2789
        self.check_grad_with_place(
            place, ['X'], 'Out', numeric_grad_delta=0.5, check_eager=True
        )
2790 2791


C
chengduo 已提交
2792
class TestPow(TestActivation):
2793 2794
    def setUp(self):
        self.op_type = "pow"
2795
        self.python_api = paddle.pow
2796
        self.check_eager = True
2797
        self.init_dtype()
2798
        self.init_shape()
2799

2800
        np.random.seed(1024)
2801
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2802 2803 2804
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
2805
        self.attrs = {'factor': 3.0}
2806
        self.outputs = {'Out': out}
2807

2808 2809 2810
    def test_check_output(self):
        self.check_output(check_eager=self.check_eager)

2811
    def test_check_grad(self):
2812 2813
        if self.dtype == np.float16:
            return
2814
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2815

2816

2817 2818 2819 2820 2821
class TestPow_ZeroDim(TestPow):
    def init_shape(self):
        self.shape = []


2822 2823 2824
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
2825 2826
        self.check_eager = False
        self.python_api = paddle.pow
2827 2828
        self.init_dtype()

2829
        np.random.seed(1024)
2830 2831 2832 2833 2834
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
2835
            'FactorTensor': np.array([3.0]).astype("float32"),
2836 2837 2838 2839 2840 2841
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
2842
        self.check_output(check_eager=self.check_eager)
2843 2844 2845 2846

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2847
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2848 2849 2850

    def test_api(self):
        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
2851 2852 2853 2854 2855 2856
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32"
        )
        res = fluid.layers.data(
            name="res", shape=[11, 17], append_batch_size=False, dtype="float32"
        )
2857 2858 2859 2860 2861

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)
2862 2863 2864
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
2865 2866

        exe = fluid.Executor(place=fluid.CPUPlace())
W
WuHaobo 已提交
2867
        res_1, res_2, res, res_6 = exe.run(
2868 2869
            fluid.default_main_program(),
            feed={"x": input},
2870 2871
            fetch_list=[out_1, out_2, res, out_6],
        )
2872

2873 2874 2875
        assert np.allclose(res_1, np.power(input, 2))
        assert np.allclose(res_2, np.power(input, 3))
        assert np.allclose(res_6, np.power(input, 3))
2876

2877
    def test_error(self):
2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889
        in1 = fluid.layers.data(
            name="in1", shape=[11, 17], append_batch_size=False, dtype="int32"
        )
        in2 = fluid.layers.data(
            name="in2", shape=[11, 17], append_batch_size=False, dtype="int64"
        )
        in3 = fluid.layers.data(
            name="in3", shape=[11, 17], append_batch_size=False, dtype="float32"
        )
        in4 = fluid.layers.data(
            name="in4", shape=[11, 17], append_batch_size=False, dtype="float64"
        )
2890 2891 2892 2893 2894 2895 2896 2897

        factor_1 = fluid.layers.fill_constant([1], "float64", 3.0)

        self.assertRaises(TypeError, fluid.layers.pow, x=in1, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in2, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in3, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in4, factor=factor_1)

2898

2899 2900 2901 2902 2903
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
2904
class TestSTanh(TestActivation):
2905 2906 2907 2908 2909 2910
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

2911 2912
    def setUp(self):
        self.op_type = "stanh"
2913
        self.init_dtype()
2914 2915
        self.init_shape()

2916 2917
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
2918

2919
        np.random.seed(1024)
2920
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2921 2922
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
2923

2924
        self.inputs = {'X': x}
2925
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
2926
        self.outputs = {'Out': out}
2927

Q
qijun 已提交
2928
    def test_check_grad(self):
2929 2930
        if self.dtype == np.float16:
            return
2931
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
2932

2933

2934 2935 2936 2937 2938 2939 2940 2941 2942 2943
class TestSTanhScaleA(TestSTanh):
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
    def get_scale_b(self):
        return 0.5


2944 2945 2946 2947 2948
class TestSTanh_ZeroDim(TestSTanh):
    def init_shape(self):
        self.shape = []


2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961
class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
2962 2963 2964
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
2965
            else paddle.CPUPlace()
2966
        )
2967 2968 2969 2970 2971 2972 2973 2974 2975 2976

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out = paddle.stanh(x, self.scale_a, self.scale_b)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in res:
2977
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2978 2979 2980 2981 2982 2983 2984

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
2985
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2986 2987 2988 2989 2990 2991 2992 2993 2994 2995
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
            out = fluid.layers.stanh(x, self.scale_a, self.scale_b)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
2996
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2997

2998
    def test_errors(self):
2999 3000
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3001
            # The input type must be Variable.
3002
            self.assertRaises(TypeError, paddle.stanh, 1)
3003
            # The input dtype must be float16, float32, float64.
3004 3005 3006
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3007
            self.assertRaises(TypeError, paddle.stanh, x_int32)
3008
            # support the input dtype is float16
3009 3010 3011
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022
            paddle.stanh(x_fp16)


class TestSTanhAPIScaleA(TestSTanhAPI):
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
    def get_scale_b(self):
        return 0.5
3023 3024


3025 3026
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
3027 3028 3029 3030
    out = np.select(
        [x_beta <= threshold, x_beta > threshold],
        [np.log(1 + np.exp(x_beta)) / beta, x],
    )
3031 3032 3033
    return out


C
chengduo 已提交
3034
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
3035 3036
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
3037
        self.python_api = paddle.nn.functional.softplus
3038
        self.init_dtype()
3039
        self.init_shape()
3040

3041 3042
        beta = 2
        threshold = 15
3043

3044
        np.random.seed(1024)
3045
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3046 3047 3048
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
3049
        self.outputs = {'Out': out}
K
kexinzhao 已提交
3050

W
Wang Bojun 已提交
3051 3052
        self.check_eager = True

3053 3054 3055
    def init_shape(self):
        self.shape = [10, 12]

K
kexinzhao 已提交
3056
    def test_check_grad(self):
3057 3058
        if self.dtype == np.float16:
            return
W
Wang Bojun 已提交
3059 3060 3061
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
K
kexinzhao 已提交
3062

3063

3064 3065 3066 3067 3068
class TestSoftplus_ZeroDim(TestSoftplus):
    def init_shape(self):
        self.shape = []


3069 3070 3071
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098
class TestSoftplusBF16(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


3099 3100 3101 3102 3103
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
3104
        np.random.seed(1024)
3105
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3106 3107 3108
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3109
            else paddle.CPUPlace()
3110
        )
3111 3112

    def test_static_api(self):
3113
        paddle.enable_static()
3114
        with paddle.static.program_guard(paddle.static.Program()):
3115
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3116 3117 3118 3119 3120 3121 3122
            out1 = F.softplus(x, self.beta, self.threshold)
            softplus = paddle.nn.Softplus(self.beta, self.threshold)
            out2 = softplus(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in res:
3123
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3124 3125 3126 3127 3128 3129 3130 3131 3132

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
3133
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3134 3135 3136
        paddle.enable_static()

    def test_fluid_api(self):
3137
        paddle.enable_static()
3138 3139 3140 3141 3142 3143
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softplus(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softplus(self.x_np)
3144
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3145 3146

    def test_errors(self):
3147
        paddle.enable_static()
3148 3149 3150 3151
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softplus, 1)
            # The input dtype must be float16, float32, float64.
3152 3153 3154
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3155 3156
            self.assertRaises(TypeError, F.softplus, x_int32)
            # support the input dtype is float16
3157 3158 3159
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3160 3161 3162 3163 3164 3165 3166 3167
            F.softplus(x_fp16)


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
3168
class TestSoftsign(TestActivation):
3169 3170
    def setUp(self):
        self.op_type = "softsign"
3171
        self.init_dtype()
3172 3173
        self.init_shape()

3174
        self.python_api = paddle.nn.functional.softsign
3175

3176
        np.random.seed(1024)
3177
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3178 3179
        out = ref_softsign(x)
        self.inputs = {'X': x}
3180
        self.outputs = {'Out': out}
3181

3182 3183 3184
    def init_shape(self):
        self.shape = [10, 12]

3185
    def test_check_grad(self):
3186 3187
        if self.dtype == np.float16:
            return
3188
        self.check_grad(['X'], 'Out', check_eager=True)
3189 3190


3191 3192 3193 3194 3195
class TestSoftsign_ZeroDim(TestSoftsign):
    def init_shape(self):
        self.shape = []


3196 3197 3198
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
3199
        np.random.seed(1024)
3200
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3201 3202 3203
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3204
            else paddle.CPUPlace()
3205
        )
3206 3207

    def test_static_api(self):
3208
        paddle.enable_static()
3209
        with paddle.static.program_guard(paddle.static.Program()):
3210
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3211 3212 3213 3214 3215 3216 3217
            out1 = F.softsign(x)
            softsign = paddle.nn.Softsign()
            out2 = softsign(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softsign(self.x_np)
        for r in res:
3218
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3219 3220 3221 3222 3223 3224 3225 3226 3227

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
3228
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3229 3230 3231
        paddle.enable_static()

    def test_fluid_api(self):
3232
        paddle.enable_static()
3233 3234 3235 3236 3237 3238
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softsign(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softsign(self.x_np)
3239
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3240 3241

    def test_errors(self):
3242
        paddle.enable_static()
3243 3244 3245 3246
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softsign, 1)
            # The input dtype must be float16, float32, float64.
3247 3248 3249
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3250 3251
            self.assertRaises(TypeError, F.softsign, x_int32)
            # support the input dtype is float16
3252 3253 3254
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3255 3256 3257
            F.softsign(x_fp16)


3258 3259 3260 3261 3262
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
3263
class TestThresholdedRelu(TestActivation):
3264 3265
    def setUp(self):
        self.op_type = "thresholded_relu"
3266
        self.init_dtype()
3267
        self.init_shape()
3268

3269
        threshold = 15
3270

3271
        np.random.seed(1024)
3272
        x = np.random.uniform(-20, 20, self.shape).astype(self.dtype)
3273 3274 3275 3276
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"threshold": threshold}
3277
        self.outputs = {'Out': out}
3278

3279 3280 3281
    def init_shape(self):
        self.shape = [10, 12]

3282
    def test_check_grad(self):
3283 3284
        if self.dtype == np.float16:
            return
3285
        self.check_grad(['X'], 'Out')
3286 3287


3288 3289 3290 3291 3292
class TestThresholdedRelu_ZeroDim(TestThresholdedRelu):
    def init_shape(self):
        self.shape = []


3293 3294 3295 3296 3297 3298 3299
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
3300 3301 3302
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3303
            else paddle.CPUPlace()
3304
        )
3305 3306 3307 3308

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3309
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3310 3311 3312 3313 3314 3315 3316
            out1 = F.thresholded_relu(x, self.threshold)
            thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
            out2 = thresholded_relu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in res:
3317
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3318 3319 3320 3321 3322 3323 3324 3325 3326

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
3327
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3328 3329 3330 3331 3332 3333 3334 3335 3336 3337
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.thresholded_relu(x, self.threshold)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
3338
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3339

3340
    def test_errors(self):
3341 3342
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3343
            # The input type must be Variable.
3344
            self.assertRaises(TypeError, F.thresholded_relu, 1)
3345
            # The input dtype must be float16, float32, float64.
3346 3347 3348
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3349
            self.assertRaises(TypeError, F.thresholded_relu, x_int32)
3350
            # support the input dtype is float16
3351 3352 3353
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3354
            F.thresholded_relu(x_fp16)
3355 3356


3357
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
3358
    return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype)
3359 3360


C
chengduo 已提交
3361
class TestHardSigmoid(TestActivation):
3362 3363
    def setUp(self):
        self.op_type = "hard_sigmoid"
3364 3365 3366 3367
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
3368
        self.init_shape()
3369

3370
        x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
3371
        lower_threshold = -self.offset / self.slope
3372
        upper_threshold = (1.0 - self.offset) / self.slope
Z
zhupengyang 已提交
3373

3374
        # Same reason as TestAbs
3375 3376 3377
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
3378

3379
        out = ref_hardsigmoid(x, self.slope, self.offset)
3380

3381 3382
        self.attrs = {'slope': self.slope, 'offset': self.offset}
        self.inputs = {'X': x}
3383
        self.outputs = {'Out': out}
3384

3385 3386 3387
    def init_shape(self):
        self.shape = [10, 12]

3388 3389
    def set_attrs(self):
        pass
3390

3391

3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402
class TestHardSigmoidFP32(TestHardSigmoid):
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


3403 3404 3405 3406 3407
class TestHardSigmoid_ZeroDim(TestHardSigmoid):
    def init_shape(self):
        self.shape = []


3408 3409 3410 3411
class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3412 3413 3414
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3415
            else paddle.CPUPlace()
3416
        )
3417 3418 3419

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3420
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3421 3422 3423 3424 3425 3426 3427
            out1 = F.hardsigmoid(x)
            m = paddle.nn.Hardsigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardsigmoid(self.x_np)
        for r in res:
3428
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3429 3430 3431 3432 3433 3434 3435 3436 3437

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3438
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3439
        paddle.enable_static()
3440 3441 3442 3443 3444 3445 3446 3447

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.hard_sigmoid(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
3448
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3449 3450 3451 3452

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.fluid.layers.hard_sigmoid(x)
3453
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3454 3455 3456 3457
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
3458
            # The input type must be Variable.
3459
            self.assertRaises(TypeError, F.hardsigmoid, 1)
3460
            # The input dtype must be float16, float32, float64.
3461 3462 3463
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3464
            self.assertRaises(TypeError, F.hardsigmoid, x_int32)
3465
            # support the input dtype is float16
3466 3467 3468
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3469
            F.hardsigmoid(x_fp16)
3470 3471


3472 3473 3474 3475 3476
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3477
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
3478 3479
    def setUp(self):
        self.op_type = "swish"
3480
        self.python_api = paddle.nn.functional.swish
3481
        self.init_dtype()
3482 3483
        self.init_shape()

3484
        self.check_eager = True
3485

3486
        np.random.seed(1024)
3487
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3488 3489
        out = ref_swish(x)
        self.inputs = {'X': x}
H
hong19860320 已提交
3490
        self.attrs = {'beta': 1.0}
3491
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
3492

3493 3494 3495
    def init_shape(self):
        self.shape = [10, 12]

A
Abhinav Arora 已提交
3496
    def test_check_grad(self):
3497 3498
        if self.dtype == np.float16:
            return
3499 3500 3501 3502
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
3503

A
Abhinav Arora 已提交
3504

3505 3506 3507 3508 3509
class TestSwish_ZeroDim(TestSwish):
    def init_shape(self):
        self.shape = []


3510 3511 3512 3513 3514
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3515 3516 3517
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3518
            else paddle.CPUPlace()
3519
        )
3520 3521 3522 3523

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3524
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3525 3526 3527 3528 3529 3530 3531
            out1 = F.swish(x)
            swish = paddle.nn.Swish()
            out2 = swish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_swish(self.x_np)
        for r in res:
3532
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3533

3534
    def func_test_dygraph_api(self):
3535 3536 3537 3538 3539 3540 3541
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3542
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3543 3544
        paddle.enable_static()

3545
    def test_dygraph_api(self):
3546
        with _test_eager_guard():
3547 3548
            self.func_test_dygraph_api()
        self.func_test_dygraph_api()
3549

3550 3551 3552 3553 3554 3555 3556 3557
    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.swish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_swish(self.x_np)
3558
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3559

3560
    def test_errors(self):
3561 3562
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3563
            # The input type must be Variable.
3564
            self.assertRaises(TypeError, F.swish, 1)
3565
            # The input dtype must be float16, float32, float64.
3566 3567 3568
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3569
            self.assertRaises(TypeError, F.swish, x_int32)
3570
            # support the input dtype is float16
3571 3572 3573
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3574
            F.swish(x_fp16)
3575 3576


3577 3578 3579 3580
def ref_mish(x, threshold=20.0):
    softplus = np.select(
        [x <= threshold, x > threshold], [np.log(1 + np.exp(x)), x]
    )
3581 3582 3583 3584 3585 3586
    return x * np.tanh(softplus)


class TestMish(TestActivation):
    def setUp(self):
        self.op_type = "mish"
3587
        self.python_api = paddle.fluid.layers.nn.mish
3588
        self.init_dtype()
3589
        self.init_shape()
3590 3591

        np.random.seed(1024)
3592
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3593 3594 3595 3596
        out = ref_mish(x)
        self.inputs = {'X': x}
        self.outputs = {'Out': out}

3597 3598 3599
    def init_shape(self):
        self.shape = [10, 12]

3600 3601 3602
    def test_check_output(self):
        self.check_output(check_eager=True)

3603 3604 3605
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
3606
        self.check_grad(['X'], 'Out', check_eager=True)
3607 3608


3609 3610 3611 3612 3613
class TestMish_ZeroDim(TestMish):
    def init_shape(self):
        self.shape = []


3614 3615 3616 3617 3618
class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3619 3620 3621
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3622
            else paddle.CPUPlace()
3623
        )
3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.mish(x)
            mish = paddle.nn.Mish()
            out2 = mish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_mish(self.x_np)
        for r in res:
3636
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3637 3638 3639 3640 3641 3642 3643 3644 3645

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3646
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3647 3648 3649 3650 3651 3652 3653 3654 3655 3656
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.mish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_mish(self.x_np)
3657
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3658 3659 3660 3661 3662 3663 3664

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.mish, 1)
            # The input dtype must be float16, float32, float64.
3665 3666 3667
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3668 3669
            self.assertRaises(TypeError, F.mish, x_int32)
            # support the input dtype is float16
3670 3671 3672
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3673 3674 3675
            F.mish(x_fp16)


3676
# ------------------ Test Error Activation----------------------
3677 3678 3679 3680 3681 3682
def create_test_error_class(op_type):
    class TestOpErrors(unittest.TestCase):
        def test_errors(self):
            with program_guard(Program(), Program()):
                op = getattr(fluid.layers, op_type)
                # The input dtype of op_type must be float32, float64.
3683 3684 3685 3686 3687 3688
                in1 = fluid.layers.data(
                    name='input2', shape=[12, 10], dtype="int32"
                )
                in2 = fluid.layers.data(
                    name='input3', shape=[12, 10], dtype="int64"
                )
3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708
                self.assertRaises(TypeError, op, in1)
                self.assertRaises(TypeError, op, in2)

    cls_name = "{0}_{1}".format(op_type, "test_errors")
    TestOpErrors.__name__ = cls_name
    globals()[cls_name] = TestOpErrors


create_test_error_class('acos')
create_test_error_class('asin')
create_test_error_class('atan')
create_test_error_class('ceil')
create_test_error_class('cos')
create_test_error_class('floor')
create_test_error_class('reciprocal')
create_test_error_class('round')
create_test_error_class('rsqrt')
create_test_error_class('sin')
create_test_error_class('sqrt')
create_test_error_class('tanh')
J
joejiong 已提交
3709
create_test_error_class('tan')
X
xiaoting 已提交
3710 3711 3712
create_test_error_class('acosh')
create_test_error_class('asinh')
create_test_error_class('atanh')
3713 3714


3715
# ------------------ Test Cudnn Activation----------------------
3716
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3717 3718 3719
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


3735 3736 3737 3738 3739 3740 3741
# ------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(
    parent, atol=1e-3, grad_check=True, grad_atol=0.80
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
C
chengduo 已提交
3742 3743 3744
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
3745

C
chengduo 已提交
3746
        def test_check_output(self):
3747
            place = core.CUDAPlace(0)
C
chengduo 已提交
3748 3749 3750
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
3751

C
chengduo 已提交
3752 3753 3754 3755
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3756 3757 3758
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol
                )
C
chengduo 已提交
3759 3760 3761 3762 3763 3764 3765

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
R
ronnywang 已提交
3766
create_test_act_fp16_class(TestExpm1)
C
chengduo 已提交
3767
create_test_act_fp16_class(TestSigmoid)
M
minghaoBD 已提交
3768
create_test_act_fp16_class(TestSilu)
C
chengduo 已提交
3769 3770
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
3771
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
3772
create_test_act_fp16_class(TestHardShrink)
3773
create_test_act_fp16_class(TestSoftshrink)
C
chengduo 已提交
3774 3775 3776 3777 3778
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
J
joejiong 已提交
3779
create_test_act_fp16_class(TestTan, grad_atol=0.85)
3780
create_test_act_fp16_class(TestCosh, grad_atol=0.85)
3781
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
3782
create_test_act_fp16_class(TestSin)
3783
create_test_act_fp16_class(TestSinh)
3784 3785
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
X
xiaoting 已提交
3786 3787 3788
create_test_act_fp16_class(TestAcosh, grad_atol=0.85)
create_test_act_fp16_class(TestAsinh, grad_atol=0.85)
create_test_act_fp16_class(TestAtanh, grad_atol=0.85)
C
chengduo 已提交
3789 3790
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
3791
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
3792 3793
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
3794
create_test_act_fp16_class(TestSoftRelu, grad_atol=0.85)
C
chengduo 已提交
3795
create_test_act_fp16_class(TestELU)
3796
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
3797 3798
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
3799 3800 3801 3802
if core.is_compiled_with_rocm():
    create_test_act_fp16_class(TestLog2, atol=5e-2, grad_atol=0.85)
else:
    create_test_act_fp16_class(TestLog2, atol=5e-2)
J
joejiong 已提交
3803
create_test_act_fp16_class(TestLog10, atol=5e-2)
3804
create_test_act_fp16_class(TestLog1p, grad_atol=0.9)
C
chengduo 已提交
3805 3806
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
3807
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
3808 3809 3810 3811 3812
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
3813
create_test_act_fp16_class(TestSwish, grad_atol=0.85)
H
huangjun12 已提交
3814
create_test_act_fp16_class(TestHardSwish)
3815
create_test_act_fp16_class(TestMish, grad_atol=0.9)
A
Abhinav Arora 已提交
3816

3817

3818 3819 3820 3821 3822 3823
def create_test_act_bf16_class(
    parent, atol=1e-2, grad_check=True, grad_atol=0.80
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3824 3825 3826 3827 3828 3829 3830 3831 3832 3833
    class TestActBF16(parent):
        def init_dtype(self):
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=atol)

        def test_check_grad(self):
            place = core.CUDAPlace(0)
3834 3835 3836
            self.check_grad_with_place(
                place, ['X'], 'Out', max_relative_error=grad_atol
            )
3837 3838 3839 3840 3841 3842 3843 3844

    cls_name = "{0}_{1}".format(parent.__name__, "bf16")
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


create_test_act_bf16_class(TestRelu)

Q
qijun 已提交
3845 3846
if __name__ == "__main__":
    unittest.main()