test_activation_op.py 117.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
Q
qijun 已提交
16
import unittest
17
import warnings
J
joejiong 已提交
18

Q
qijun 已提交
19
import numpy as np
20
from op_test import OpTest, convert_float_to_uint16
21 22
from scipy.special import erf, expit

23
import paddle
J
joejiong 已提交
24 25
import paddle.fluid as fluid
import paddle.fluid.core as core
26
import paddle.nn.functional as F
27
from paddle.fluid import Program, program_guard
28
from paddle.fluid.layer_helper import LayerHelper
Q
qijun 已提交
29

30 31
paddle.enable_static()

Q
qijun 已提交
32

33
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
34 35 36 37
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
38
            self.assertRaises(TypeError, paddle.sqrt, in1)
Z
Zhaolong Xing 已提交
39
            # The input dtype of sqrt op must be float16, float32, float64.
G
GGBond8488 已提交
40 41
            in2 = paddle.static.data(
                name='input2', shape=[-1, 12, 10], dtype="int32"
42
            )
43
            self.assertRaises(TypeError, paddle.sqrt, in2)
Z
Zhaolong Xing 已提交
44

G
GGBond8488 已提交
45 46
            in3 = paddle.static.data(
                name='input3', shape=[-1, 12, 10], dtype="float16"
47
            )
48
            paddle.sqrt(x=in3)
Z
Zhaolong Xing 已提交
49 50


C
chengduo 已提交
51
class TestActivation(OpTest):
Q
qijun 已提交
52 53
    def setUp(self):
        self.op_type = "exp"
54
        self.init_dtype()
55
        self.init_shape()
56
        self.init_kernel_type()
C
chentianyu03 已提交
57 58
        self.check_eager = True
        self.python_api = paddle.exp
59

60
        np.random.seed(2049)
61
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
62 63 64 65
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
66 67

    def test_check_output(self):
68 69 70 71
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_output(check_eager=check_eager)
Q
qijun 已提交
72 73

    def test_check_grad(self):
74 75
        if self.dtype == np.float16:
            return
76 77 78 79
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
Q
qijun 已提交
80

81
    def init_dtype(self):
82
        self.dtype = np.float64
83

84 85 86
    def init_shape(self):
        self.shape = [11, 17]

87 88 89
    def init_kernel_type(self):
        pass

Q
qijun 已提交
90

91 92 93 94 95
class TestActivation_ZeroDim(TestActivation):
    def init_shape(self):
        self.shape = []


96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
class TestExpPrimFp32(OpTest):
    def setUp(self):
        self.op_type = "exp"
        self.prim_op_type = "prim"
        self.init_dtype()
        self.init_shape()
        self.python_api = paddle.exp

        np.random.seed(2049)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.skip_cinn()
        self.set_only_prim()

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)

    def init_dtype(self):
        self.dtype = np.float32

    def init_shape(self):
        self.shape = [12, 17]

    def skip_cinn(self):
126
        self.enable_cinn = True
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150

    def set_only_prim(self):
        pass


class TestExpPrimFp64(TestExpPrimFp32):
    def init_dtype(self):
        self.dtype = np.float64


class TestExpPrimFp16(TestExpPrimFp32):
    def init_dtype(self):
        self.dtype = np.float16

    def set_only_prim(self):
        self.only_prim = True

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)

    def skip_cinn(self):
151
        self.enable_cinn = True
152 153 154 155 156 157 158 159 160 161


class TestExpPrim_ZeroDim(TestExpPrimFp32):
    def init_shape(self):
        self.shape = []

    def skip_cinn(self):
        self.enable_cinn = False


R
ronnywang 已提交
162 163 164
class TestExpm1(TestActivation):
    def setUp(self):
        self.op_type = "expm1"
165
        self.python_api = paddle.expm1
R
ronnywang 已提交
166
        self.init_dtype()
167
        self.init_shape()
R
ronnywang 已提交
168 169

        np.random.seed(2049)
170
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
R
ronnywang 已提交
171 172 173 174 175 176
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
177 178 179 180
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
R
ronnywang 已提交
181 182


183 184 185 186 187
class TestExpm1_ZeroDim(TestExpm1):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
class TestExpm1API(unittest.TestCase):
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        paddle.enable_static()

        def run(place):
            with paddle.static.program_guard(paddle.static.Program()):
                X = paddle.fluid.data('X', self.shape, dtype=self.dtype)
                out = paddle.expm1(X)
                exe = paddle.static.Executor(place)
                res = exe.run(feed={'X': self.x})
            for r in res:
212
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
213 214 215 216 217 218 219 220 221

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
        def run(place):
            paddle.disable_static(place)
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
222
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
223 224 225 226 227 228 229 230 231 232 233 234 235
            paddle.enable_static()

        for place in self.place:
            run(place)

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            X = paddle.fluid.data('X', self.shape, dtype='int32')
            self.assertRaises(TypeError, paddle.expm1, X)
        # The input dtype must be float16, float32, float64.


236
class TestParameter:
237 238
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
239 240
            if paddle.fluid.framework.in_dygraph_mode():
                paddle.enable_static()
G
GGBond8488 已提交
241 242
            np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
            data = paddle.static.data(name="X", shape=[-1, 1], dtype="float32")
W
WuHaobo 已提交
243
            out = eval("paddle.%s(data, name='Y')" % self.op_type)
244 245
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
246
            (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
W
WuHaobo 已提交
247
            expected = eval("np.%s(np_x)" % self.op_type)
248
            np.testing.assert_allclose(result, expected, rtol=1e-05)
249 250 251 252 253 254 255

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
256
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
257 258


C
chengduo 已提交
259
class TestSigmoid(TestActivation):
Q
qijun 已提交
260 261
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
262 263 264
        self.prim_op_type = "comp"
        self.enable_cinn = False
        self.python_api = paddle.nn.functional.sigmoid
265
        self.init_dtype()
266
        self.init_shape()
267

268
        np.random.seed(1024)
269
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
270 271 272 273
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
274

275 276 277
    def init_dtype(self):
        self.dtype = np.float32

278
    def test_check_grad(self):
279 280
        if self.dtype == np.float16:
            return
Z
zxcd 已提交
281
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True)
282

283

284 285 286 287 288
class TestSigmoid_ZeroDim(TestSigmoid):
    def init_shape(self):
        self.shape = []


Z
zxcd 已提交
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
class TestSigmoidFP16(TestActivation):
    def setUp(self):
        self.op_type = "sigmoid"
        self.prim_op_type = "comp"
        self.enable_cinn = False
        self.only_prim = True
        self.python_api = paddle.nn.functional.sigmoid
        self.init_dtype()
        self.init_shape()

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def init_dtype(self):
        self.dtype = np.float16

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True)

    def test_check_output(self):
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_output(check_eager=check_eager, check_prim=True)


319 320 321
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
322 323 324
class TestSigmoidBF16(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
325 326 327
        self.prim_op_type = "comp"
        self.enable_cinn = False
        self.python_api = paddle.nn.functional.sigmoid
328
        self.init_dtype()
329
        self.init_shape()
330 331

        np.random.seed(1024)
332
        x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
333 334 335 336 337 338 339 340 341 342
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

343 344 345
    def init_shape(self):
        self.shape = [11, 17]

346 347
    def test_check_output(self):
        place = core.CUDAPlace(0)
Z
zxcd 已提交
348
        # elementwise_pow can not support bfloat16, skip check_prim = True.
349 350 351 352 353 354 355
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out')


356 357 358 359 360 361 362 363
'''
class TestSigmoidBF16_ZeroDim(TestSigmoidBF16):

    def init_shape(self):
        self.shape = []
'''


M
minghaoBD 已提交
364 365 366
class TestSilu(TestActivation):
    def setUp(self):
        self.op_type = "silu"
Z
zxcd 已提交
367
        self.prim_op_type = "comp"
368
        self.enable_cinn = True
Z
zxcd 已提交
369
        self.python_api = paddle.nn.functional.silu
M
minghaoBD 已提交
370
        self.init_dtype()
371
        self.init_shape()
M
minghaoBD 已提交
372 373

        np.random.seed(1024)
374
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
M
minghaoBD 已提交
375 376 377 378 379 380 381 382 383 384 385
        out = x / (np.exp(-x) + 1)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}

    def init_dtype(self):
        self.dtype = np.float32

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
Z
zxcd 已提交
386
        self.check_grad(['X'], 'Out', check_prim=True)
M
minghaoBD 已提交
387 388


389 390 391
class TestSilu_ZeroDim(TestSilu):
    def init_shape(self):
        self.shape = []
392
        self.enable_cinn = False
393 394


Z
zxcd 已提交
395 396 397 398
class TestSiluFP16(TestActivation):
    def setUp(self):
        self.op_type = "silu"
        self.prim_op_type = "comp"
399
        self.enable_cinn = True
Z
zxcd 已提交
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
        self.only_prim = True
        self.python_api = paddle.nn.functional.silu
        self.init_dtype()
        self.init_shape()

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
        out = x / (np.exp(-x) + 1)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}

    def init_dtype(self):
        self.dtype = np.float16

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)

    def test_check_output(self):
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_output(check_eager=check_eager, check_prim=True)


M
minghaoBD 已提交
425 426 427 428
class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
429 430 431
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
M
minghaoBD 已提交
432
            else paddle.CPUPlace()
433
        )
M
minghaoBD 已提交
434 435 436 437 438 439 440 441 442 443 444 445

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [11, 17])
            out1 = F.silu(x)
            m = paddle.nn.Silu()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in res:
446
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
447 448 449 450 451 452 453 454 455

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
456
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
457 458 459 460 461 462 463
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.silu, 1)
            # The input dtype must be float16, float32, float64.
464 465 466
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
M
minghaoBD 已提交
467 468
            self.assertRaises(TypeError, F.silu, x_int32)
            # support the input dtype is float16
469 470 471
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
M
minghaoBD 已提交
472 473 474
            F.silu(x_fp16)


C
chengduo 已提交
475
class TestLogSigmoid(TestActivation):
476 477
    def setUp(self):
        self.op_type = "logsigmoid"
478
        self.init_dtype()
479
        self.init_shape()
480

481
        np.random.seed(2048)
482
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
483 484
        out = np.log(1 / (1 + np.exp(-x)))

485
        self.inputs = {'X': x}
486
        self.outputs = {'Out': out}
487 488

    def test_check_grad(self):
489 490
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
491
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
492 493


494 495 496 497 498
class TestLogSigmoid_ZeroDim(TestLogSigmoid):
    def init_shape(self):
        self.shape = []


499
class TestLogSigmoidAPI(unittest.TestCase):
500
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
501
    def setUp(self):
502
        np.random.seed(1024)
503
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
504 505 506
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
507
            else paddle.CPUPlace()
508
        )
509 510

    def test_static_api(self):
511
        paddle.enable_static()
512
        with paddle.static.program_guard(paddle.static.Program()):
513
            x = paddle.fluid.data('X', [11, 17])
514
            out1 = F.log_sigmoid(x)
515 516 517 518 519 520
            m = paddle.nn.LogSigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in res:
521
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
522 523 524 525

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
526
        out1 = F.log_sigmoid(x)
527 528 529 530
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
531
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
532 533 534
        paddle.enable_static()

    def test_errors(self):
535
        paddle.enable_static()
536 537
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
538
            self.assertRaises(TypeError, F.log_sigmoid, 1)
539
            # The input dtype must be float16, float32, float64.
540 541 542
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
543
            self.assertRaises(TypeError, F.log_sigmoid, x_int32)
544
            # support the input dtype is float16
545 546 547
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
548
            F.log_sigmoid(x_fp16)
549 550


551
class TestTanh(TestActivation, TestParameter):
552 553
    def setUp(self):
        self.op_type = "tanh"
554
        self.init_dtype()
555 556
        self.init_shape()

557
        np.random.seed(1024)
558
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
559 560 561 562
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
563 564

    def test_check_grad(self):
565 566
        if self.dtype == np.float16:
            return
567
        self.check_grad(['X'], 'Out')
568

569
    def init_dtype(self):
570
        # TODO If dtype is float64, the output (Out) has diff at CPUPlace
571 572 573 574
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

575

576 577 578 579 580
class TestTanh_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


W
WangXi 已提交
581 582 583 584
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
585
        np.random.seed(1024)
W
WangXi 已提交
586
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
587 588 589
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
W
WangXi 已提交
590
            else paddle.CPUPlace()
591
        )
592 593 594 595
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
596 597

    def test_static_api(self):
598
        paddle.enable_static()
W
WangXi 已提交
599
        with paddle.static.program_guard(paddle.static.Program()):
600
            x = paddle.fluid.data('X', [10, 12], self.dtype)
601
            out1 = self.tanh(x)
W
WangXi 已提交
602 603 604 605 606 607
            th = paddle.nn.Tanh()
            out2 = th(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.tanh(self.x_np)
        for r in res:
608
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
609 610 611

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
612
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
613 614 615 616 617 618
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
619
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
620 621 622
        paddle.enable_static()

    def test_errors(self):
623
        paddle.enable_static()
W
WangXi 已提交
624 625
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
626
            self.assertRaises(TypeError, self.tanh, 1)
W
WangXi 已提交
627
            # The input dtype must be float16, float32.
628 629 630
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
631
            self.assertRaises(TypeError, self.tanh, x_int32)
W
WangXi 已提交
632
            # support the input dtype is float16
633 634 635
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
636 637 638 639 640 641 642
            self.tanh(x_fp16)


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
643 644


645
class TestAtan(TestActivation, TestParameter):
646 647 648
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()
649
        self.init_shape()
650

651
        np.random.seed(1024)
652
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
653 654 655 656 657 658 659 660
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
661
        self.check_grad(['X'], 'Out')
662

W
WuHaobo 已提交
663 664
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
G
GGBond8488 已提交
665 666
            np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
            data = paddle.static.data(name="X", shape=[-1, 1], dtype="float32")
W
WuHaobo 已提交
667 668 669
            out = paddle.atan(data, name='Y')
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
670
            (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
W
WuHaobo 已提交
671 672 673
            expected = np.arctan(np_x)
            self.assertEqual(result, expected)

674 675 676 677 678 679 680 681
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

682

683 684 685 686 687
class TestAtan_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


688 689 690 691
class TestSinh(TestActivation):
    def setUp(self):
        self.op_type = "sinh"
        self.init_dtype()
692
        self.init_shape()
693

694
        np.random.seed(1024)
695
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
696 697 698 699 700 701 702 703 704 705
        out = np.sinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

706 707 708 709 710 711 712

class TestSinh_ZeroDim(TestSinh):
    def init_shape(self):
        self.shape = []


class TestSinhAPI(unittest.TestCase):
713 714 715 716
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
717
            z = paddle.sinh(x).numpy()
718
            z_expected = np.sinh(np_x)
719
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
720 721 722 723

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
724 725 726
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
G
GGBond8488 已提交
727
            data_x = paddle.static.data(
728 729 730 731
                name="data_x",
                shape=test_data_shape,
                dtype="float32",
            )
732

733
            pd_sinh_out = paddle.sinh(data_x)
734 735
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
736 737 738 739 740
            (np_sinh_res,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[pd_sinh_out],
            )
741 742

        expected_res = np.sinh(input_x)
743
        np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
744 745 746 747

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
748 749 750
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
751 752
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
753
            loss = paddle.sinh(var)
754 755 756 757 758 759 760 761 762
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
763
            self.assertRaises(TypeError, paddle.sinh, 1)
764 765
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
766
            self.assertRaises(TypeError, paddle.sinh, x_int32)
767 768
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
769
            paddle.sinh(x_fp16)
770 771 772 773 774 775


class TestCosh(TestActivation):
    def setUp(self):
        self.op_type = "cosh"
        self.init_dtype()
776
        self.init_shape()
777

778
        np.random.seed(1024)
779
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
780 781 782 783 784 785 786 787 788 789
        out = np.cosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

790 791 792 793 794 795 796

class TestCosh_ZeroDim(TestCosh):
    def init_shape(self):
        self.shape = []


class TestCoshAPI(unittest.TestCase):
797 798 799 800
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
801
            z = paddle.cosh(x).numpy()
802
            z_expected = np.cosh(np_x)
803
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
804 805 806 807

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
808 809 810
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
G
GGBond8488 已提交
811
            data_x = paddle.static.data(
812 813 814 815
                name="data_x",
                shape=test_data_shape,
                dtype="float32",
            )
816 817 818 819

            pd_cosh_out = paddle.cosh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
820 821 822 823 824
            (np_cosh_res,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[pd_cosh_out],
            )
825 826

        expected_res = np.cosh(input_x)
827
        np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
828 829 830 831

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
832 833 834
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
835 836
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
837
            loss = paddle.cosh(var)
838 839 840 841 842 843 844 845 846
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
847
            self.assertRaises(TypeError, paddle.cosh, 1)
848 849
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
850
            self.assertRaises(TypeError, paddle.cosh, x_int32)
851 852
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
853
            paddle.cosh(x_fp16)
854 855


856 857 858 859 860 861
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
K
Kavya Srinet 已提交
862 863
    def setUp(self):
        self.op_type = "tanh_shrink"
864
        self.init_dtype()
865
        self.init_shape()
866

867
        np.random.seed(1024)
868
        x = np.random.uniform(10, 20, self.shape).astype(self.dtype)
869
        out = ref_tanhshrink(x)
870

871
        self.inputs = {'X': x}
872
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
873 874

    def test_check_grad(self):
875 876
        if self.dtype == np.float16:
            return
877
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
878

879

880 881 882 883 884
class TestTanhshrink_ZeroDim(TestTanhshrink):
    def init_shape(self):
        self.shape = []


885 886 887
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
888
        np.random.seed(1024)
889
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
890 891 892
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
893
            else paddle.CPUPlace()
894
        )
895 896

    def test_static_api(self):
897
        paddle.enable_static()
898
        with paddle.static.program_guard(paddle.static.Program()):
899
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
900 901 902 903 904 905 906
            out1 = F.tanhshrink(x)
            tanhshrink = paddle.nn.Tanhshrink()
            out2 = tanhshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_tanhshrink(self.x_np)
        for r in res:
907
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
908 909 910 911 912 913 914 915 916

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
917
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
918 919 920
        paddle.enable_static()

    def test_errors(self):
921
        paddle.enable_static()
922 923 924 925
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.tanhshrink, 1)
            # The input dtype must be float16, float32, float64.
926 927 928
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
929 930
            self.assertRaises(TypeError, F.tanhshrink, x_int32)
            # support the input dtype is float16
931 932 933
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
934 935 936
            F.tanhshrink(x_fp16)


937 938 939 940 941 942
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
943
class TestHardShrink(TestActivation):
944 945
    def setUp(self):
        self.op_type = "hard_shrink"
946
        self.init_dtype()
947
        self.init_shape()
948

949 950
        self.threshold = 0.5
        self.set_attrs()
951
        np.random.seed(1024)
952
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10
953
        out = ref_hardshrink(x, self.threshold)
954

955
        self.attrs = {'threshold': self.threshold}
956
        self.inputs = {'X': x}
957
        self.outputs = {'Out': out}
958

959 960 961
    def init_shape(self):
        self.shape = [10, 12]

962 963 964
    def set_attrs(self):
        pass

965
    def test_check_grad(self):
966 967
        if self.dtype == np.float16:
            return
968
        self.check_grad(['X'], 'Out')
969 970


971 972 973 974 975
class TestHardShrink_threshold_negative(TestHardShrink):
    def set_attrs(self):
        self.threshold = -0.1


976 977 978 979 980 981 982 983
'''
class TestHardShrink_ZeroDim(TestHardShrink):

    def init_shape(self):
        self.shape = []
'''


984 985 986
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
987
        np.random.seed(1024)
988
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
989 990 991
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
992
            else paddle.CPUPlace()
993
        )
994 995

    def test_static_api(self):
996
        paddle.enable_static()
997
        with paddle.static.program_guard(paddle.static.Program()):
998
            x = paddle.fluid.data('X', [10, 12])
999 1000 1001 1002 1003 1004 1005
            out1 = F.hardshrink(x)
            hd = paddle.nn.Hardshrink()
            out2 = hd(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in res:
1006
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1007 1008 1009

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
1010
        x = paddle.to_tensor(self.x_np)
1011 1012 1013 1014 1015
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
1016
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1017 1018 1019 1020 1021 1022

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
1023
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1024 1025
        paddle.enable_static()

1026
    def test_errors(self):
1027
        paddle.enable_static()
1028
        with paddle.static.program_guard(paddle.static.Program()):
1029
            # The input type must be Variable.
1030
            self.assertRaises(TypeError, F.hardshrink, 1)
1031
            # The input dtype must be float16, float32, float64.
1032 1033 1034
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1035
            self.assertRaises(TypeError, F.hardshrink, x_int32)
1036
            # support the input dtype is float16
1037 1038 1039
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1040
            F.hardshrink(x_fp16)
1041 1042


1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
1054
        np.random.seed(1024)
1055
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
1056 1057 1058
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1059
            else paddle.CPUPlace()
1060
        )
1061 1062

    def test_static_api(self):
1063
        paddle.enable_static()
1064
        with paddle.static.program_guard(paddle.static.Program()):
1065
            x = paddle.fluid.data('X', [10, 12])
1066 1067 1068 1069 1070 1071 1072
            out1 = F.hardtanh(x)
            m = paddle.nn.Hardtanh()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardtanh(self.x_np)
        for r in res:
1073
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1074 1075 1076

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
1077
        x = paddle.to_tensor(self.x_np)
1078 1079 1080 1081 1082
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
1083
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1084 1085 1086 1087 1088 1089

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
1090
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1091 1092 1093
        paddle.enable_static()

    def test_errors(self):
1094
        paddle.enable_static()
1095 1096 1097 1098
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.hardtanh, 1)
            # The input dtype must be float16, float32, float64.
1099 1100 1101
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1102 1103
            self.assertRaises(TypeError, F.hardtanh, x_int32)
            # support the input dtype is float16
1104 1105 1106
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1107 1108 1109
            F.hardtanh(x_fp16)


1110 1111 1112
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
1113 1114
        out - threshold
    )
1115 1116 1117 1118
    return out


class TestSoftshrink(TestActivation):
1119 1120
    def setUp(self):
        self.op_type = "softshrink"
1121 1122
        self.check_eager = True
        self.python_api = paddle.nn.functional.softshrink
1123
        self.init_dtype()
1124
        self.init_shape()
1125

1126
        threshold = 0.8
1127

1128
        np.random.seed(1023)
1129
        x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype)
1130 1131 1132
        out = ref_softshrink(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"lambda": threshold}
1133
        self.outputs = {'Out': out}
1134 1135

    def test_check_grad(self):
1136 1137
        if self.dtype == np.float16:
            return
1138
        self.check_grad(['X'], 'Out', check_eager=True)
1139

1140

1141 1142 1143 1144 1145
class TestSoftshrink_ZeroDim(TestSoftshrink):
    def init_shape(self):
        self.shape = []


1146 1147 1148 1149
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
1150
        np.random.seed(1024)
1151
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
1152 1153 1154
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1155
            else paddle.CPUPlace()
1156
        )
1157 1158

    def test_static_api(self):
1159
        paddle.enable_static()
1160
        with paddle.static.program_guard(paddle.static.Program()):
1161
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1162 1163 1164 1165 1166 1167 1168
            out1 = F.softshrink(x, self.threshold)
            softshrink = paddle.nn.Softshrink(self.threshold)
            out2 = softshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in res:
1169
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1170 1171 1172 1173 1174 1175 1176 1177 1178

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
1179
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1180 1181
        paddle.enable_static()

1182
    def test_errors(self):
1183
        paddle.enable_static()
1184
        with paddle.static.program_guard(paddle.static.Program()):
1185
            # The input type must be Variable.
1186
            self.assertRaises(TypeError, F.softshrink, 1)
1187
            # The input dtype must be float16, float32, float64.
1188 1189 1190
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1191
            self.assertRaises(TypeError, F.softshrink, x_int32)
1192
            # The threshold must be no less than zero
1193 1194 1195
            x_fp32 = paddle.fluid.data(
                name='x_fp32', shape=[12, 10], dtype='float32'
            )
1196
            self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
1197
            # support the input dtype is float16
1198 1199 1200
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1201
            F.softshrink(x_fp16)
1202 1203


1204
class TestSqrt(TestActivation, TestParameter):
1205 1206
    def setUp(self):
        self.op_type = "sqrt"
1207
        self.prim_op_type = "prim"
1208
        self.python_api = paddle.sqrt
1209
        self.init_dtype()
1210
        self.init_shape()
1211

1212
        np.random.seed(1023)
1213
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
1214 1215 1216 1217
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1218
        self.enable_cinn = False
1219

1220
    # TODO(wanghao107) add prim test
1221
    def test_check_grad(self):
1222 1223
        if self.dtype == np.float16:
            return
1224 1225 1226 1227
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
1228

1229

1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
class TestSqrtPrimFp32(TestActivation):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "prim"
        self.python_api = paddle.sqrt
        self.init_dtype()
        self.init_shape()
        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1243
        self.enable_cinn = True
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)

    def test_check_output(self):
        self.check_output(check_eager=True)

    def init_dtype(self):
        self.dtype = np.float32


1257 1258 1259
class TestSqrt_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []
1260
        self.enable_cinn = False
1261 1262


1263 1264 1265
class TestSqrtPrim_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []
1266
        self.enable_cinn = False
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276

    def init_dtype(self):
        self.dtype = np.float32

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_prim=True)


1277 1278 1279
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
1280 1281 1282
class TestSqrtBF16(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
1283
        self.prim_op_type = "prim"
1284
        self.python_api = paddle.sqrt
1285
        self.init_dtype()
1286
        self.init_shape()
1287 1288

        np.random.seed(1023)
1289
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
1290 1291 1292 1293 1294 1295
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}
1296 1297
        # TODO(wanghao107): add prim test
        self.enable_cinn = False
1298 1299 1300 1301

    def init_dtype(self):
        self.dtype = np.uint16

1302 1303 1304
    def init_shape(self):
        self.shape = [11, 17]

1305 1306
    def test_check_output(self):
        place = core.CUDAPlace(0)
1307
        self.check_output_with_place(place, check_eager=True)
1308 1309 1310

    def test_check_grad(self):
        place = core.CUDAPlace(0)
1311
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1312 1313


Z
zhoukunsheng 已提交
1314 1315 1316
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
Z
zyfncg 已提交
1317
        self.python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1318
        self.init_dtype()
1319
        self.init_shape()
Z
zhoukunsheng 已提交
1320

1321
        np.random.seed(1024)
1322
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
1323 1324 1325 1326 1327
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1328 1329 1330
    def init_shape(self):
        self.shape = [10, 12]

Z
zhoukunsheng 已提交
1331 1332 1333
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1334 1335 1336
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.0005, check_eager=True
        )
Z
zhoukunsheng 已提交
1337 1338


1339 1340 1341 1342 1343 1344 1345 1346
'''
class TestRsqrt_ZeroDim(TestRsqrt):

    def init_shape(self):
        self.shape = []
'''


C
chengduo 已提交
1347
class TestAbs(TestActivation):
1348 1349
    def setUp(self):
        self.op_type = "abs"
1350
        self.init_dtype()
1351
        self.init_shape()
1352

1353
        np.random.seed(1024)
1354
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
C
chengduo 已提交
1355
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1356
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1357
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1358 1359
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1360 1361 1362 1363
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1364

1365 1366 1367
    def init_shape(self):
        self.shape = [4, 25]

1368
    def test_check_grad(self):
1369 1370
        if self.dtype == np.float16:
            return
1371
        self.check_grad(['X'], 'Out', check_eager=False)
1372

1373

1374 1375 1376 1377 1378
class TestAbs_ZeroDim(TestAbs):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1379
class TestCeil(TestActivation):
D
dzhwinter 已提交
1380 1381
    def setUp(self):
        self.op_type = "ceil"
1382 1383
        self.check_eager = True
        self.python_api = paddle.ceil
1384
        self.init_dtype()
1385
        self.init_shape()
1386

1387
        np.random.seed(1024)
1388
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1389 1390 1391 1392
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1393

1394 1395 1396
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1397
    # The same reason with TestFloor
C
chengduo 已提交
1398
    def test_check_grad(self):
1399 1400 1401
        pass


1402 1403 1404 1405 1406
class TestCeil_ZeroDim(TestCeil):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1407
class TestFloor(TestActivation):
D
dzhwinter 已提交
1408 1409
    def setUp(self):
        self.op_type = "floor"
1410 1411
        self.check_eager = True
        self.python_api = paddle.floor
1412
        self.init_dtype()
1413
        self.init_shape()
1414

1415
        np.random.seed(1024)
1416
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1417 1418 1419 1420
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1421

1422 1423 1424
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1425
    # the gradient on floor, ceil, round is undefined.
1426
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1427 1428
    # The same reason with TestFloor
    def test_check_grad(self):
1429 1430 1431
        pass


1432 1433 1434 1435 1436
class TestFloor_ZeroDim(TestFloor):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1437
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
1438 1439
    def setUp(self):
        self.op_type = "cos"
1440
        self.init_dtype()
1441
        self.init_shape()
1442

1443
        np.random.seed(1024)
1444
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1445 1446 1447 1448
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
1449

1450 1451 1452
    def init_shape(self):
        self.shape = [10, 12]

C
add sin  
chengduoZH 已提交
1453
    def test_check_grad(self):
1454 1455
        if self.dtype == np.float16:
            return
1456
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
1457

1458

1459 1460 1461 1462 1463
class TestCos_ZeroDim(TestCos):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
1464 1465 1466 1467 1468
class TestTan(TestActivation):
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
        self.init_dtype()
1469 1470
        self.init_shape()

J
joejiong 已提交
1471
        self.dtype = 'float32'
1472
        self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1473 1474 1475
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
J
joejiong 已提交
1476
            else paddle.CPUPlace()
1477
        )
J
joejiong 已提交
1478 1479 1480 1481 1482 1483

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}

1484 1485 1486
    def init_shape(self):
        self.shape = [10, 12]

J
joejiong 已提交
1487 1488 1489 1490 1491
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502

class TestTan_ZeroDim(TestTan):
    def init_shape(self):
        self.shape = []


class TestTanAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(1024)
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1503 1504 1505
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1506
            else paddle.CPUPlace()
1507
        )
1508

J
joejiong 已提交
1509 1510 1511 1512 1513
    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1514
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1515 1516 1517 1518 1519
        paddle.enable_static()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
1520
            x = paddle.static.data('X', [11, 17], self.dtype)
J
joejiong 已提交
1521 1522 1523 1524
            out = paddle.tan(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tan(self.x_np)
1525
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1526 1527 1528 1529

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
1530 1531 1532
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
J
joejiong 已提交
1533 1534 1535 1536 1537 1538 1539 1540
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1541 1542 1543 1544
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()
1545
        self.init_shape()
1546

1547
        np.random.seed(1024)
1548
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1549 1550 1551 1552 1553
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1554 1555 1556
    def init_shape(self):
        self.shape = [10, 12]

1557 1558 1559
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1560
        self.check_grad(['X'], 'Out')
1561 1562


1563 1564 1565 1566 1567
class TestAcos_ZeroDim(TestAcos):
    def init_shape(self):
        self.shape = []


1568
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
1569 1570
    def setUp(self):
        self.op_type = "sin"
1571
        self.init_dtype()
1572
        self.init_shape()
1573 1574
        # prim not support now
        self.enable_cinn = False
1575

1576
        np.random.seed(1024)
1577
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1578 1579 1580 1581
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
1582

1583 1584 1585
    def init_shape(self):
        self.shape = [10, 12]

C
add cos  
chengduoZH 已提交
1586
    def test_check_grad(self):
1587 1588
        if self.dtype == np.float16:
            return
1589
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
1590 1591


1592 1593 1594 1595 1596
class TestSin_ZeroDim(TestSin):
    def init_shape(self):
        self.shape = []


1597 1598 1599 1600
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()
1601
        self.init_shape()
1602

1603
        np.random.seed(2048)
1604
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1605 1606 1607 1608 1609
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1610 1611 1612
    def init_shape(self):
        self.shape = [10, 12]

1613 1614 1615
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1616
        self.check_grad(['X'], 'Out')
1617 1618


1619 1620 1621 1622 1623
class TestAsin_ZeroDim(TestAsin):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1624 1625 1626 1627
class TestAcosh(TestActivation):
    def setUp(self):
        self.op_type = "acosh"
        self.init_dtype()
1628
        self.init_shape()
X
xiaoting 已提交
1629 1630

        np.random.seed(1024)
1631
        x = np.random.uniform(2, 3, self.shape).astype(self.dtype)
X
xiaoting 已提交
1632 1633 1634 1635 1636
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1637 1638 1639
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1640 1641 1642 1643 1644 1645
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1646 1647 1648 1649 1650
class TestAcosh_ZeroDim(TestAcosh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1651 1652 1653 1654
class TestAsinh(TestActivation):
    def setUp(self):
        self.op_type = "asinh"
        self.init_dtype()
1655
        self.init_shape()
X
xiaoting 已提交
1656 1657

        np.random.seed(1024)
1658
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
X
xiaoting 已提交
1659 1660 1661 1662 1663
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1664 1665 1666
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1667 1668 1669 1670 1671 1672
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1673 1674 1675 1676 1677
class TestAsinh_ZeroDim(TestAsinh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1678 1679 1680 1681
class TestAtanh(TestActivation):
    def setUp(self):
        self.op_type = "atanh"
        self.init_dtype()
1682
        self.init_shape()
X
xiaoting 已提交
1683 1684

        np.random.seed(400)
1685
        x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype)
X
xiaoting 已提交
1686 1687 1688 1689 1690
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1691 1692 1693
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1694 1695 1696 1697 1698 1699
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1700 1701 1702 1703 1704
class TestAtanh_ZeroDim(TestAtanh):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1705
class TestRound(TestActivation):
D
dzhwinter 已提交
1706 1707
    def setUp(self):
        self.op_type = "round"
1708 1709
        self.check_eager = True
        self.python_api = paddle.round
1710
        self.init_dtype()
1711
        self.init_shape()
1712

1713
        np.random.seed(1024)
1714
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1715 1716 1717 1718
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1719

1720 1721 1722
    def init_shape(self):
        self.shape = [10, 12]

C
chengduo 已提交
1723
    def test_check_grad(self):
1724 1725 1726
        pass


1727 1728 1729 1730 1731
class TestRound_ZeroDim(TestRound):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1732
class TestRelu(TestActivation):
1733
    def setUp(self):
Q
qijun 已提交
1734
        self.op_type = "relu"
K
Kexin Zhao 已提交
1735
        self.init_dtype()
1736
        self.init_shape()
K
Kexin Zhao 已提交
1737

1738
        np.random.seed(1024)
1739
        if self.dtype == np.uint16:
1740
            x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
1741 1742 1743 1744 1745
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = convert_float_to_uint16(np.maximum(x, 0))
            self.inputs = {'X': convert_float_to_uint16(x)}
        else:
1746
            x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1747 1748 1749 1750
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = np.maximum(x, 0)
            self.inputs = {'X': x}
K
Kexin Zhao 已提交
1751 1752

        self.outputs = {'Out': out}
1753 1754

    def test_check_grad(self):
K
Kexin Zhao 已提交
1755 1756
        if self.dtype == np.float16:
            return
1757
        self.check_grad(['X'], 'Out')
A
Adam 已提交
1758 1759


1760 1761 1762 1763 1764
class TestRelu_ZeroDim(TestRelu):
    def init_shape(self):
        self.shape = []


1765 1766 1767
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1768
        np.random.seed(1024)
1769
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1770 1771 1772
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1773
            else paddle.CPUPlace()
1774
        )
1775 1776 1777 1778
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1779 1780

    def test_static_api(self):
1781
        paddle.enable_static()
1782
        with paddle.static.program_guard(paddle.static.Program()):
1783
            x = paddle.fluid.data('X', [10, 12])
1784
            out1 = self.relu(x)
1785 1786 1787 1788 1789 1790
            m = paddle.nn.ReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.maximum(self.x_np, 0)
        for r in res:
1791
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1792 1793 1794 1795 1796

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1797 1798
        out1 = m(x)
        out2 = self.relu(x)
1799 1800
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1801
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1802 1803
        paddle.enable_static()

1804
    def test_errors(self):
1805
        paddle.enable_static()
1806
        with paddle.static.program_guard(paddle.static.Program()):
1807
            # The input type must be Variable.
1808
            self.assertRaises(TypeError, self.relu, 1)
1809
            # The input dtype must be float16, float32, float64.
1810 1811 1812
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
1813
            self.assertRaises(TypeError, self.relu, x_int32)
1814
            # support the input dtype is float16
1815 1816 1817
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
1818 1819 1820 1821 1822 1823 1824
            self.relu(x_fp16)


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1825 1826


1827 1828 1829 1830 1831 1832
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1833
class TestLeakyRelu(TestActivation):
1834 1835 1836
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1837 1838 1839
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()
1840
        self.init_shape()
1841
        alpha = self.get_alpha()
A
Adam 已提交
1842

1843
        np.random.seed(1024)
1844
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
A
Adam 已提交
1845
        # The same reason with TestAbs
1846 1847
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1848

1849
        self.inputs = {'X': x}
A
Adam 已提交
1850
        self.outputs = {'Out': out}
1851
        self.attrs = {'alpha': alpha}
A
Adam 已提交
1852 1853 1854 1855

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1856
        self.check_grad(['X'], 'Out')
1857 1858


1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873
class TestLeakyReluAlpha1(TestLeakyRelu):
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
    def get_alpha(self):
        return -2.0


1874 1875 1876 1877 1878
class TestLeakyRelu_ZeroDim(TestLeakyRelu):
    def init_shape(self):
        self.shape = []


1879 1880 1881
class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    def setUp(self):
1882
        np.random.seed(1024)
1883
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1884 1885 1886
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1887
            else paddle.CPUPlace()
1888
        )
1889 1890

    def test_static_api(self):
1891
        paddle.enable_static()
1892
        with paddle.static.program_guard(paddle.static.Program()):
1893
            x = paddle.fluid.data('X', [10, 12])
1894 1895 1896 1897 1898 1899 1900
            out1 = F.leaky_relu(x)
            m = paddle.nn.LeakyReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_leaky_relu(self.x_np)
        for r in res:
1901
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1902 1903 1904

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
1905
        x = paddle.to_tensor(self.x_np)
1906 1907 1908 1909 1910
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
1911
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1912 1913 1914 1915 1916 1917

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
1918
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1919 1920
        paddle.enable_static()

1921
    def test_errors(self):
1922
        paddle.enable_static()
1923
        with paddle.static.program_guard(paddle.static.Program()):
1924
            # The input type must be Variable.
1925
            self.assertRaises(TypeError, F.leaky_relu, 1)
1926
            # The input dtype must be float16, float32, float64.
1927 1928 1929
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1930 1931
            self.assertRaises(TypeError, F.leaky_relu, x_int32)
            # support the input dtype is float16
1932 1933 1934
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1935
            F.leaky_relu(x_fp16)
1936 1937


1938 1939
def gelu(x, approximate):
    if approximate:
1940 1941 1942 1943 1944 1945 1946 1947
        y_ref = (
            0.5
            * x
            * (
                1.0
                + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
            )
        )
1948 1949 1950 1951 1952 1953
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
1954 1955 1956
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1957
        self.init_shape()
1958
        approximate = True
1959
        np.random.seed(1024)
1960
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1961
        out = gelu(x, approximate)
C
Clementine 已提交
1962

1963
        self.inputs = {'X': x}
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1977
        self.init_shape()
1978
        approximate = False
1979
        np.random.seed(2048)
1980
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1981
        out = gelu(x, approximate)
C
Clementine 已提交
1982

1983
        self.inputs = {'X': x}
C
Clementine 已提交
1984
        self.outputs = {'Out': out}
1985
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
1986 1987 1988 1989

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1990
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
1991 1992


1993 1994 1995 1996 1997
class TestGelu_ZeroDim(TestGelu):
    def init_shape(self):
        self.shape = []


1998 1999 2000
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
2001
        np.random.seed(1024)
2002
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
2003 2004 2005
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2006
            else paddle.CPUPlace()
2007
        )
2008 2009

    def test_static_api(self):
2010
        paddle.enable_static()
2011
        with paddle.static.program_guard(paddle.static.Program()):
2012
            x = paddle.fluid.data('X', [11, 17])
2013 2014 2015 2016 2017 2018 2019
            out1 = F.gelu(x)
            m = paddle.nn.GELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = gelu(self.x_np, False)
        for r in res:
2020
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2021 2022 2023 2024 2025 2026 2027 2028 2029

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
2030
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2031 2032 2033 2034 2035 2036

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
2037
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2038 2039 2040
        paddle.enable_static()

    def test_errors(self):
2041
        paddle.enable_static()
2042 2043 2044 2045
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.gelu, 1)
            # The input dtype must be float16, float32, float64.
2046 2047 2048
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
2049 2050
            self.assertRaises(TypeError, F.gelu, x_int32)
            # support the input dtype is float16
2051 2052 2053
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
2054 2055 2056
            F.gelu(x_fp16)


C
chengduo 已提交
2057
class TestBRelu(TestActivation):
2058 2059
    def setUp(self):
        self.op_type = "brelu"
2060 2061
        self.init_dtype()

2062
        np.random.seed(1024)
Z
zhupengyang 已提交
2063
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2064 2065
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
2066 2067
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
2068
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
2069 2070 2071
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
2072 2073 2074

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
2075
        self.outputs = {'Out': t}
2076 2077

    def test_check_grad(self):
2078 2079
        if self.dtype == np.float16:
            return
2080
        self.check_grad(['X'], 'Out')
2081

2082

2083 2084 2085 2086 2087 2088 2089
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
2090
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
2091
    def setUp(self):
2092
        self.op_type = "relu6"
2093
        self.init_dtype()
2094
        self.init_shape()
2095
        self.python_api = paddle.nn.functional.relu6
2096

2097
        np.random.seed(1024)
2098
        x = np.random.uniform(-1, 10, self.shape).astype(self.dtype)
2099
        x[np.abs(x) < 0.005] = 0.02
2100
        out = ref_relu6(x)
2101

2102 2103
        self.inputs = {'X': x}
        self.attrs = {'threshold': 6.0}
2104
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
2105

2106 2107 2108
    def init_shape(self):
        self.shape = [10, 12]

2109 2110 2111
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2112
        self.check_grad(['X'], 'Out', check_eager=True)
2113 2114


2115 2116 2117 2118 2119
class TestRelu6_ZeroDim(TestRelu6):
    def init_shape(self):
        self.shape = []


2120 2121 2122
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
2123
        np.random.seed(1024)
2124 2125
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
2126 2127 2128
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2129
            else paddle.CPUPlace()
2130
        )
2131 2132

    def test_static_api(self):
2133
        paddle.enable_static()
2134
        with paddle.static.program_guard(paddle.static.Program()):
2135
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2136 2137 2138 2139 2140 2141 2142
            out1 = F.relu6(x)
            relu6 = paddle.nn.ReLU6()
            out2 = relu6(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_relu6(self.x_np)
        for r in res:
2143
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2144 2145 2146 2147 2148 2149 2150 2151 2152

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
2153
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2154 2155 2156
        paddle.enable_static()

    def test_fluid_api(self):
2157
        paddle.enable_static()
2158 2159
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
2160
            out = paddle.nn.functional.relu6(x)
2161 2162 2163
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_relu6(self.x_np)
2164
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2165

2166
    def test_errors(self):
2167
        paddle.enable_static()
2168
        with paddle.static.program_guard(paddle.static.Program()):
2169
            # The input type must be Variable.
2170
            self.assertRaises(TypeError, F.relu6, 1)
2171
            # The input dtype must be float16, float32, float64.
2172 2173 2174
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2175
            self.assertRaises(TypeError, F.relu6, x_int32)
2176
            # support the input dtype is float16
2177 2178 2179
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2180
            F.relu6(x_fp16)
2181 2182


2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206
class TestRelu6APIWarnings(unittest.TestCase):
    def test_warnings(self):
        with warnings.catch_warnings(record=True) as context:
            warnings.simplefilter("always")

            paddle.enable_static()
            helper = LayerHelper("relu6")
            data = paddle.static.data(
                name='data', shape=[None, 3, 32, 32], dtype='float32'
            )
            out = helper.create_variable_for_type_inference(dtype=data.dtype)
            os.environ['FLAGS_print_extra_attrs'] = "1"
            helper.append_op(
                type="relu6",
                inputs={'X': data},
                outputs={'Out': out},
                attrs={'threshold': 6.0},
            )
            self.assertTrue(
                "op relu6 use extra_attr: threshold" in str(context[-1].message)
            )
            os.environ['FLAGS_print_extra_attrs'] = "0"


2207
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
2208 2209 2210 2211
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
2212 2213 2214
    return (
        x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale
    ).astype(x_dtype)
2215 2216


H
huangjun12 已提交
2217 2218 2219 2220
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
2221
        self.init_shape()
R
Roc 已提交
2222
        self.prim_op_type = "comp"
2223
        self.python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
2224

2225
        np.random.seed(1024)
2226
        x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
H
huangjun12 已提交
2227 2228 2229
        threshold = 6.0
        scale = 6.0
        offset = 3.0
2230
        # the same with TestAbs
H
huangjun12 已提交
2231 2232
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
2233
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
2234

2235
        self.inputs = {'X': x}
H
huangjun12 已提交
2236 2237
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}
R
Roc 已提交
2238
        self.enable_cinn = False
H
huangjun12 已提交
2239

2240 2241 2242
    def init_shape(self):
        self.shape = [10, 12]

H
huangjun12 已提交
2243
    def test_check_grad(self):
R
Roc 已提交
2244
        self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
2245 2246

    def test_check_output(self):
R
Roc 已提交
2247
        self.check_output(check_eager=True, check_prim=True)
H
huangjun12 已提交
2248 2249


2250
class TestHardSwish_ZeroDim(TestHardSwish):
R
Roc 已提交
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273
    def setUp(self):
        super().setUp()
        self.enable_cinn = False

    def init_shape(self):
        self.shape = []


class TestHardSwishFP16(TestHardSwish):
    def setUp(self):
        super().setUp()
        self.only_prim = True
        self.enable_cinn = False

    def init_dtype(self):
        self.dtype = np.float16


class TestHardSwish_ZeroDim_FP16(TestHardSwishFP16):
    def setUp(self):
        super().setUp()
        self.enable_cinn = False

2274 2275 2276 2277
    def init_shape(self):
        self.shape = []


2278 2279 2280 2281
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2282 2283 2284
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2285
            else paddle.CPUPlace()
2286
        )
2287 2288 2289

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
2290
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2291 2292 2293 2294 2295 2296 2297
            out1 = F.hardswish(x)
            m = paddle.nn.Hardswish()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardswish(self.x_np)
        for r in res:
2298
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2299 2300 2301

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
2302
        x = paddle.to_tensor([11648.0, 11448.0])
2303 2304 2305
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
2306
        out_ref = [11648.0, 11448.0]
2307
        for r in [out1, out2]:
2308
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2309
        paddle.enable_static()
2310 2311 2312 2313

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
2314
            out = paddle.nn.functional.hardswish(x)
2315 2316 2317
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardswish(self.x_np)
2318
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2319 2320 2321

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
2322
        out = paddle.nn.functional.hardswish(x)
2323
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
2324 2325 2326 2327
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
2328
            # The input type must be Variable.
2329
            self.assertRaises(TypeError, F.hardswish, 1)
2330
            # The input dtype must be float16, float32, float64.
2331 2332 2333
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2334
            self.assertRaises(TypeError, F.hardswish, x_int32)
2335
            # support the input dtype is float16
2336 2337 2338
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2339
            F.hardswish(x_fp16)
2340 2341


C
chengduo 已提交
2342
class TestSoftRelu(TestActivation):
2343 2344
    def setUp(self):
        self.op_type = "soft_relu"
2345 2346
        self.init_dtype()

2347
        np.random.seed(4096)
2348
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2349
        threshold = 2.0
Q
qijun 已提交
2350 2351
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
2352
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
2353 2354 2355
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
2356 2357 2358 2359 2360
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
2361 2362

    def test_check_grad(self):
2363 2364
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
2365
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
2366

2367

2368
def elu(x, alpha):
Z
zhupengyang 已提交
2369
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
2370 2371 2372
    return out_ref.astype(x.dtype)


C
chengduo 已提交
2373
class TestELU(TestActivation):
2374 2375
    def setUp(self):
        self.op_type = "elu"
2376
        self.init_dtype()
2377
        self.init_shape()
2378

2379
        np.random.seed(1024)
2380
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
Z
zhupengyang 已提交
2381
        alpha = self.get_alpha()
2382
        out = elu(x, alpha)
2383 2384 2385 2386
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
2387
        self.outputs = {'Out': out}
2388

2389 2390 2391
    def init_shape(self):
        self.shape = [10, 12]

2392
    def test_check_grad(self):
2393 2394
        if self.dtype == np.float16:
            return
2395
        self.check_grad(['X'], 'Out')
2396

Z
zhupengyang 已提交
2397
    def get_alpha(self):
2398
        return 1.0
Z
zhupengyang 已提交
2399 2400 2401 2402 2403 2404


class TestELUAlpha(TestELU):
    def get_alpha(self):
        return -0.2

2405

2406 2407 2408 2409 2410
class TestELU_ZeroDim(TestELU):
    def init_shape(self):
        self.shape = []


2411 2412 2413
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
2414
        np.random.seed(1024)
2415
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2416 2417 2418
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2419
            else paddle.CPUPlace()
2420
        )
2421 2422 2423 2424
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2425 2426

    def test_static_api(self):
2427
        paddle.enable_static()
2428
        with paddle.static.program_guard(paddle.static.Program()):
2429
            x = paddle.fluid.data('X', [10, 12])
2430
            out1 = self.elu(x)
2431 2432 2433 2434 2435 2436
            m = paddle.nn.ELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = elu(self.x_np, 1.0)
        for r in res:
2437
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2438 2439 2440 2441

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
2442 2443
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2444 2445 2446 2447
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2448
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2449

2450 2451
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2452 2453 2454 2455
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2456
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2457 2458
        paddle.enable_static()

2459
    def test_errors(self):
2460
        paddle.enable_static()
2461 2462
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
2463
            self.assertRaises(TypeError, self.elu, 1)
2464
            # The input dtype must be float16, float32, float64.
2465 2466 2467
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
2468
            self.assertRaises(TypeError, self.elu, x_int32)
2469
            # support the input dtype is float16
2470 2471 2472
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
2473 2474 2475
            self.elu(x_fp16)


Z
zhupengyang 已提交
2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)
        paddle.enable_static()


2488 2489 2490 2491 2492 2493 2494 2495 2496
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()
2497
        self.init_shape()
2498

2499
        self.python_api = paddle.nn.functional.celu
2500
        np.random.seed(1024)
2501
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
2502 2503 2504 2505 2506 2507
        alpha = 1.5
        out = celu(x, alpha)
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {'Out': out}

2508 2509 2510
    def init_shape(self):
        self.shape = [10, 12]

2511 2512 2513
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2514
        self.check_grad(['X'], 'Out', check_eager=True)
2515 2516


2517 2518 2519 2520 2521
class TestCELU_ZeroDim(TestCELU):
    def init_shape(self):
        self.shape = []


2522 2523 2524 2525 2526
class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2527 2528 2529
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2530
            else paddle.CPUPlace()
2531
        )
2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out1 = self.celu(x, 1.5)
            m = paddle.nn.CELU(1.5)
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = celu(self.x_np, 1.5)
        for r in res:
2548
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2549 2550 2551 2552 2553 2554 2555 2556 2557 2558

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2559
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2560 2561 2562 2563 2564 2565 2566

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2567
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2568 2569 2570 2571 2572 2573 2574 2575
        paddle.enable_static()

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, self.celu, 1)
            # The input dtype must be float16, float32, float64.
2576 2577 2578
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
2579 2580
            self.assertRaises(TypeError, self.celu, x_int32)
            # The alpha must be not equal 0
2581 2582 2583
            x_fp32 = paddle.fluid.data(
                name='x_fp32', shape=[10, 12], dtype='float32'
            )
2584 2585
            self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
            # support the input dtype is float16
2586 2587 2588
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
2589 2590 2591
            self.celu(x_fp16)


C
chengduo 已提交
2592
class TestReciprocal(TestActivation):
Q
qijun 已提交
2593 2594
    def setUp(self):
        self.op_type = "reciprocal"
2595
        self.python_api = paddle.reciprocal
2596
        self.init_dtype()
2597
        self.init_shape()
2598

2599
        np.random.seed(1024)
2600
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2601 2602 2603 2604
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2605 2606

    def test_check_grad(self):
2607 2608
        if self.dtype == np.float16:
            return
2609 2610 2611 2612
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2613 2614


2615 2616 2617 2618 2619
class TestReciprocal_ZeroDim(TestReciprocal):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
2620
class TestLog(TestActivation):
Q
qijun 已提交
2621 2622
    def setUp(self):
        self.op_type = "log"
2623 2624
        self.check_eager = True
        self.python_api = paddle.log
2625
        self.init_dtype()
2626
        self.init_shape()
2627

2628
        np.random.seed(1024)
2629
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2630 2631 2632 2633
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2634 2635

    def test_check_grad(self):
2636 2637
        if self.dtype == np.float16:
            return
2638
        self.check_grad(['X'], 'Out', check_eager=True)
Q
qijun 已提交
2639

2640
    def test_error(self):
G
GGBond8488 已提交
2641 2642
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
2643

2644 2645
        self.assertRaises(TypeError, paddle.log, in1)
        self.assertRaises(TypeError, paddle.log, in2)
2646

2647

2648 2649 2650 2651 2652
class TestLog_ZeroDim(TestLog):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2653 2654 2655
class TestLog2(TestActivation):
    def setUp(self):
        self.op_type = "log2"
2656 2657
        self.check_eager = True
        self.python_api = paddle.log2
J
joejiong 已提交
2658
        self.init_dtype()
2659
        self.init_shape()
J
joejiong 已提交
2660

2661
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2662 2663 2664 2665 2666 2667 2668 2669
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2670
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2671 2672 2673 2674 2675 2676 2677 2678 2679

    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log2, in1)
        self.assertRaises(TypeError, paddle.log2, in2)

    def test_api(self):
2680 2681 2682
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
J
joejiong 已提交
2683
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2684 2685 2686
            data_x = paddle.static.data(
                name="data_x", shape=[11, 17], dtype="float64"
            )
J
joejiong 已提交
2687 2688 2689 2690

            out1 = paddle.log2(data_x)
            exe = paddle.static.Executor(place=fluid.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2691 2692 2693 2694 2695
            (res1,) = exe.run(
                paddle.static.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
J
joejiong 已提交
2696
        expected_res = np.log2(input_x)
2697
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2698 2699 2700 2701 2702 2703 2704 2705

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2706
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2707 2708


2709 2710 2711 2712 2713
class TestLog2_ZeroDim(TestLog2):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2714 2715 2716
class TestLog10(TestActivation):
    def setUp(self):
        self.op_type = "log10"
2717 2718
        self.check_eager = True
        self.python_api = paddle.log10
J
joejiong 已提交
2719
        self.init_dtype()
2720
        self.init_shape()
J
joejiong 已提交
2721

2722
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2723 2724 2725 2726 2727 2728 2729 2730
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2731
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2732

2733 2734 2735 2736 2737 2738 2739

class TestLog10_ZeroDim(TestLog10):
    def init_shape(self):
        self.shape = []


class TestLog10API(unittest.TestCase):
J
joejiong 已提交
2740 2741 2742 2743 2744 2745 2746 2747
    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log10, in1)
        self.assertRaises(TypeError, paddle.log10, in2)

    def test_api(self):
2748 2749 2750
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
J
joejiong 已提交
2751
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2752 2753 2754
            data_x = paddle.static.data(
                name="data_x", shape=[11, 17], dtype="float64"
            )
J
joejiong 已提交
2755 2756 2757 2758

            out1 = paddle.log10(data_x)
            exe = paddle.static.Executor(place=paddle.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2759 2760 2761 2762 2763
            (res1,) = exe.run(
                paddle.static.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
J
joejiong 已提交
2764
        expected_res = np.log10(input_x)
2765
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2766 2767 2768 2769 2770 2771 2772 2773

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2774
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2775 2776


2777 2778 2779
class TestLog1p(TestActivation):
    def setUp(self):
        self.op_type = "log1p"
2780 2781
        self.check_eager = True
        self.python_api = paddle.log1p
2782
        self.init_dtype()
2783
        self.init_shape()
2784

2785
        np.random.seed(1024)
2786
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2787 2788 2789 2790 2791 2792 2793 2794
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2795
        self.check_grad(['X'], 'Out', check_eager=True)
2796

2797 2798 2799 2800 2801 2802 2803

class TestLog1p_ZeroDim(TestLog1p):
    def init_shape(self):
        self.shape = []


class TestLog1pAPI(unittest.TestCase):
2804 2805 2806
    def test_api(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
G
GGBond8488 已提交
2807
            data_x = paddle.static.data(
2808 2809 2810 2811
                name="data_x",
                shape=[11, 17],
                dtype="float64",
            )
2812 2813 2814 2815

            out1 = paddle.log1p(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
2816 2817 2818 2819 2820
            (res1,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
2821
        expected_res = np.log1p(input_x)
2822
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
2823 2824 2825 2826 2827 2828 2829 2830

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
2831
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
2832 2833


C
chengduo 已提交
2834
class TestSquare(TestActivation):
Q
qijun 已提交
2835 2836
    def setUp(self):
        self.op_type = "square"
2837
        self.python_api = paddle.square
2838
        self.init_dtype()
2839
        self.init_shape()
2840

2841
        np.random.seed(1024)
2842
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2843 2844 2845 2846
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2847 2848

    def test_check_grad(self):
2849 2850
        if self.dtype == np.float16:
            return
2851 2852 2853
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.007, check_eager=True
        )
2854 2855 2856

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2857

2858

2859 2860 2861 2862 2863
class TestSquare_ZeroDim(TestSquare):
    def init_shape(self):
        self.shape = []


2864 2865 2866
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
2867 2868 2869
class TestSquareBF16(OpTest):
    def setUp(self):
        self.op_type = "square"
2870
        self.python_api = paddle.square
2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
2887
        self.check_output_with_place(place, check_eager=True)
2888 2889 2890

    def test_check_grad(self):
        place = core.CUDAPlace(0)
2891 2892 2893
        self.check_grad_with_place(
            place, ['X'], 'Out', numeric_grad_delta=0.5, check_eager=True
        )
2894 2895


C
chengduo 已提交
2896
class TestPow(TestActivation):
2897 2898
    def setUp(self):
        self.op_type = "pow"
2899
        self.python_api = paddle.pow
2900
        self.check_eager = True
2901
        self.init_dtype()
2902
        self.init_shape()
2903

2904
        np.random.seed(1024)
2905
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2906 2907 2908
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
2909
        self.attrs = {'factor': 3.0}
2910
        self.outputs = {'Out': out}
2911

2912 2913 2914
    def test_check_output(self):
        self.check_output(check_eager=self.check_eager)

2915
    def test_check_grad(self):
2916 2917
        if self.dtype == np.float16:
            return
2918
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2919

2920

2921 2922 2923 2924 2925
class TestPow_ZeroDim(TestPow):
    def init_shape(self):
        self.shape = []


2926 2927 2928
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
2929 2930
        self.check_eager = False
        self.python_api = paddle.pow
2931 2932
        self.init_dtype()

2933
        np.random.seed(1024)
2934 2935 2936 2937 2938
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
2939
            'FactorTensor': np.array([3.0]).astype("float32"),
2940 2941 2942 2943 2944 2945
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
2946
        self.check_output(check_eager=self.check_eager)
2947 2948 2949 2950

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2951
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2952 2953 2954

    def test_api(self):
        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
G
GGBond8488 已提交
2955 2956
        x = paddle.static.data(name="x", shape=[11, 17], dtype="float32")
        res = paddle.static.data(name="res", shape=[11, 17], dtype="float32")
2957 2958 2959

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
2960 2961
        out_1 = paddle.pow(x, factor_1)
        out_2 = paddle.pow(x, factor_2)
2962 2963 2964
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
2965 2966

        exe = fluid.Executor(place=fluid.CPUPlace())
W
WuHaobo 已提交
2967
        res_1, res_2, res, res_6 = exe.run(
2968 2969
            fluid.default_main_program(),
            feed={"x": input},
2970 2971
            fetch_list=[out_1, out_2, res, out_6],
        )
2972

2973 2974 2975
        assert np.allclose(res_1, np.power(input, 2))
        assert np.allclose(res_2, np.power(input, 3))
        assert np.allclose(res_6, np.power(input, 3))
2976 2977


2978 2979 2980 2981 2982
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
2983
class TestSTanh(TestActivation):
2984 2985 2986 2987 2988 2989
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

2990 2991
    def setUp(self):
        self.op_type = "stanh"
2992
        self.init_dtype()
2993 2994
        self.init_shape()

2995 2996
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
2997

2998
        np.random.seed(1024)
2999
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
3000 3001
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
3002

3003
        self.inputs = {'X': x}
3004
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
3005
        self.outputs = {'Out': out}
3006

Q
qijun 已提交
3007
    def test_check_grad(self):
3008 3009
        if self.dtype == np.float16:
            return
3010
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
3011

3012

3013 3014 3015 3016 3017 3018 3019 3020 3021 3022
class TestSTanhScaleA(TestSTanh):
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
    def get_scale_b(self):
        return 0.5


3023 3024 3025 3026 3027
class TestSTanh_ZeroDim(TestSTanh):
    def init_shape(self):
        self.shape = []


3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040
class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
3041 3042 3043
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
3044
            else paddle.CPUPlace()
3045
        )
3046 3047 3048 3049 3050 3051 3052 3053 3054 3055

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out = paddle.stanh(x, self.scale_a, self.scale_b)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in res:
3056
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3057 3058 3059 3060 3061 3062 3063

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
3064
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3065 3066 3067 3068 3069 3070
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
3071
            out = paddle.stanh(x, self.scale_a, self.scale_b)
3072 3073 3074
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
3075
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3076

3077
    def test_errors(self):
3078 3079
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3080
            # The input type must be Variable.
3081
            self.assertRaises(TypeError, paddle.stanh, 1)
3082
            # The input dtype must be float16, float32, float64.
3083 3084 3085
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3086
            self.assertRaises(TypeError, paddle.stanh, x_int32)
3087
            # support the input dtype is float16
3088 3089 3090
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101
            paddle.stanh(x_fp16)


class TestSTanhAPIScaleA(TestSTanhAPI):
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
    def get_scale_b(self):
        return 0.5
3102 3103


3104 3105
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
3106 3107 3108 3109
    out = np.select(
        [x_beta <= threshold, x_beta > threshold],
        [np.log(1 + np.exp(x_beta)) / beta, x],
    )
3110 3111 3112
    return out


C
chengduo 已提交
3113
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
3114 3115
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
3116
        self.python_api = paddle.nn.functional.softplus
3117
        self.init_dtype()
3118
        self.init_shape()
3119

3120 3121
        beta = 2
        threshold = 15
3122

3123
        np.random.seed(1024)
3124
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3125 3126 3127
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
3128
        self.outputs = {'Out': out}
K
kexinzhao 已提交
3129

W
Wang Bojun 已提交
3130 3131
        self.check_eager = True

3132 3133 3134
    def init_shape(self):
        self.shape = [10, 12]

K
kexinzhao 已提交
3135
    def test_check_grad(self):
3136 3137
        if self.dtype == np.float16:
            return
W
Wang Bojun 已提交
3138 3139 3140
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
K
kexinzhao 已提交
3141

3142

3143 3144 3145 3146 3147
class TestSoftplus_ZeroDim(TestSoftplus):
    def init_shape(self):
        self.shape = []


3148 3149 3150
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177
class TestSoftplusBF16(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


3178 3179 3180 3181 3182
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
3183
        np.random.seed(1024)
3184
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3185 3186 3187
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3188
            else paddle.CPUPlace()
3189
        )
3190 3191

    def test_static_api(self):
3192
        paddle.enable_static()
3193
        with paddle.static.program_guard(paddle.static.Program()):
3194
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3195 3196 3197 3198 3199 3200 3201
            out1 = F.softplus(x, self.beta, self.threshold)
            softplus = paddle.nn.Softplus(self.beta, self.threshold)
            out2 = softplus(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in res:
3202
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3203 3204 3205 3206 3207 3208 3209 3210 3211

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
3212
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3213 3214 3215
        paddle.enable_static()

    def test_errors(self):
3216
        paddle.enable_static()
3217 3218 3219 3220
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softplus, 1)
            # The input dtype must be float16, float32, float64.
3221 3222 3223
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3224 3225
            self.assertRaises(TypeError, F.softplus, x_int32)
            # support the input dtype is float16
3226 3227 3228
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3229 3230 3231 3232 3233 3234 3235 3236
            F.softplus(x_fp16)


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
3237
class TestSoftsign(TestActivation):
3238 3239
    def setUp(self):
        self.op_type = "softsign"
3240
        self.init_dtype()
3241 3242
        self.init_shape()

3243
        self.python_api = paddle.nn.functional.softsign
3244

3245
        np.random.seed(1024)
3246
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3247 3248
        out = ref_softsign(x)
        self.inputs = {'X': x}
3249
        self.outputs = {'Out': out}
3250

3251 3252 3253
    def init_shape(self):
        self.shape = [10, 12]

3254
    def test_check_grad(self):
3255 3256
        if self.dtype == np.float16:
            return
3257
        self.check_grad(['X'], 'Out', check_eager=True)
3258 3259


3260 3261 3262 3263 3264
class TestSoftsign_ZeroDim(TestSoftsign):
    def init_shape(self):
        self.shape = []


3265 3266 3267
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
3268
        np.random.seed(1024)
3269
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3270 3271 3272
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3273
            else paddle.CPUPlace()
3274
        )
3275 3276

    def test_static_api(self):
3277
        paddle.enable_static()
3278
        with paddle.static.program_guard(paddle.static.Program()):
3279
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3280 3281 3282 3283 3284 3285 3286
            out1 = F.softsign(x)
            softsign = paddle.nn.Softsign()
            out2 = softsign(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softsign(self.x_np)
        for r in res:
3287
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3288 3289 3290 3291 3292 3293 3294 3295 3296

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
3297
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3298 3299 3300
        paddle.enable_static()

    def test_errors(self):
3301
        paddle.enable_static()
3302 3303 3304 3305
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softsign, 1)
            # The input dtype must be float16, float32, float64.
3306 3307 3308
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3309 3310
            self.assertRaises(TypeError, F.softsign, x_int32)
            # support the input dtype is float16
3311 3312 3313
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3314 3315 3316
            F.softsign(x_fp16)


3317 3318 3319 3320 3321
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
3322
class TestThresholdedRelu(TestActivation):
3323 3324
    def setUp(self):
        self.op_type = "thresholded_relu"
3325
        self.init_dtype()
3326
        self.init_shape()
3327

3328
        threshold = 15
3329

3330
        np.random.seed(1024)
3331
        x = np.random.uniform(-20, 20, self.shape).astype(self.dtype)
3332 3333 3334 3335
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"threshold": threshold}
3336
        self.outputs = {'Out': out}
3337

3338 3339 3340
    def init_shape(self):
        self.shape = [10, 12]

3341
    def test_check_grad(self):
3342 3343
        if self.dtype == np.float16:
            return
3344
        self.check_grad(['X'], 'Out')
3345 3346


3347 3348 3349 3350 3351
class TestThresholdedRelu_ZeroDim(TestThresholdedRelu):
    def init_shape(self):
        self.shape = []


3352 3353 3354 3355 3356 3357 3358
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
3359 3360 3361
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3362
            else paddle.CPUPlace()
3363
        )
3364 3365 3366 3367

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3368
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3369 3370 3371 3372 3373 3374 3375
            out1 = F.thresholded_relu(x, self.threshold)
            thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
            out2 = thresholded_relu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in res:
3376
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3377 3378 3379 3380 3381 3382 3383 3384 3385

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
3386
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3387 3388
        paddle.enable_static()

3389
    def test_errors(self):
3390 3391
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3392
            # The input type must be Variable.
3393
            self.assertRaises(TypeError, F.thresholded_relu, 1)
3394
            # The input dtype must be float16, float32, float64.
3395 3396 3397
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3398
            self.assertRaises(TypeError, F.thresholded_relu, x_int32)
3399
            # support the input dtype is float16
3400 3401 3402
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3403
            F.thresholded_relu(x_fp16)
3404 3405


3406
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
3407
    return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype)
3408 3409


C
chengduo 已提交
3410
class TestHardSigmoid(TestActivation):
3411 3412
    def setUp(self):
        self.op_type = "hard_sigmoid"
3413 3414 3415 3416
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
3417
        self.init_shape()
3418

3419
        x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
3420
        lower_threshold = -self.offset / self.slope
3421
        upper_threshold = (1.0 - self.offset) / self.slope
Z
zhupengyang 已提交
3422

3423
        # Same reason as TestAbs
3424 3425 3426
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
3427

3428
        out = ref_hardsigmoid(x, self.slope, self.offset)
3429

3430 3431
        self.attrs = {'slope': self.slope, 'offset': self.offset}
        self.inputs = {'X': x}
3432
        self.outputs = {'Out': out}
3433

3434 3435 3436
    def init_shape(self):
        self.shape = [10, 12]

3437 3438
    def set_attrs(self):
        pass
3439

3440

3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451
class TestHardSigmoidFP32(TestHardSigmoid):
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


3452 3453 3454 3455 3456
class TestHardSigmoid_ZeroDim(TestHardSigmoid):
    def init_shape(self):
        self.shape = []


3457 3458 3459 3460
class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3461 3462 3463
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3464
            else paddle.CPUPlace()
3465
        )
3466 3467 3468

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3469
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3470 3471 3472 3473 3474 3475 3476
            out1 = F.hardsigmoid(x)
            m = paddle.nn.Hardsigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardsigmoid(self.x_np)
        for r in res:
3477
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3478 3479 3480 3481 3482 3483 3484 3485 3486

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3487
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3488
        paddle.enable_static()
3489 3490 3491 3492

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3493
            out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3494 3495 3496
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
3497
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3498 3499 3500

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
3501
        out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3502
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3503 3504 3505 3506
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
3507
            # The input type must be Variable.
3508
            self.assertRaises(TypeError, F.hardsigmoid, 1)
3509
            # The input dtype must be float16, float32, float64.
3510 3511 3512
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3513
            self.assertRaises(TypeError, F.hardsigmoid, x_int32)
3514
            # support the input dtype is float16
3515 3516 3517
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3518
            F.hardsigmoid(x_fp16)
3519 3520


3521 3522 3523 3524 3525
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3526
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
3527 3528
    def setUp(self):
        self.op_type = "swish"
3529
        self.python_api = paddle.nn.functional.swish
3530
        self.init_dtype()
3531 3532
        self.init_shape()

3533
        self.check_eager = True
3534

3535
        np.random.seed(1024)
3536
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3537 3538
        out = ref_swish(x)
        self.inputs = {'X': x}
H
hong19860320 已提交
3539
        self.attrs = {'beta': 1.0}
3540
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
3541

3542 3543 3544
    def init_shape(self):
        self.shape = [10, 12]

A
Abhinav Arora 已提交
3545
    def test_check_grad(self):
3546 3547
        if self.dtype == np.float16:
            return
3548 3549 3550 3551
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
3552

A
Abhinav Arora 已提交
3553

3554 3555 3556 3557 3558
class TestSwish_ZeroDim(TestSwish):
    def init_shape(self):
        self.shape = []


3559 3560 3561 3562 3563
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3564 3565 3566
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3567
            else paddle.CPUPlace()
3568
        )
3569 3570 3571 3572

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3573
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3574 3575 3576 3577 3578 3579 3580
            out1 = F.swish(x)
            swish = paddle.nn.Swish()
            out2 = swish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_swish(self.x_np)
        for r in res:
3581
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3582

3583
    def test_dygraph_api(self):
3584 3585 3586 3587 3588 3589 3590
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3591
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3592 3593 3594 3595 3596 3597
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3598
            out = paddle.nn.functional.swish(x)
3599 3600 3601
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_swish(self.x_np)
3602
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3603

3604
    def test_errors(self):
3605 3606
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3607
            # The input type must be Variable.
3608
            self.assertRaises(TypeError, F.swish, 1)
3609
            # The input dtype must be float16, float32, float64.
3610 3611 3612
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3613
            self.assertRaises(TypeError, F.swish, x_int32)
3614
            # support the input dtype is float16
3615 3616 3617
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3618
            F.swish(x_fp16)
3619 3620


3621 3622 3623 3624
def ref_mish(x, threshold=20.0):
    softplus = np.select(
        [x <= threshold, x > threshold], [np.log(1 + np.exp(x)), x]
    )
3625 3626 3627 3628 3629 3630
    return x * np.tanh(softplus)


class TestMish(TestActivation):
    def setUp(self):
        self.op_type = "mish"
3631
        self.python_api = paddle.nn.functional.mish
3632
        self.init_dtype()
3633
        self.init_shape()
3634 3635

        np.random.seed(1024)
3636
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3637 3638 3639 3640
        out = ref_mish(x)
        self.inputs = {'X': x}
        self.outputs = {'Out': out}

3641 3642 3643
    def init_shape(self):
        self.shape = [10, 12]

3644 3645 3646
    def test_check_output(self):
        self.check_output(check_eager=True)

3647 3648 3649
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
3650
        self.check_grad(['X'], 'Out', check_eager=True)
3651 3652


3653 3654 3655 3656 3657
class TestMish_ZeroDim(TestMish):
    def init_shape(self):
        self.shape = []


3658 3659 3660 3661 3662
class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3663 3664 3665
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3666
            else paddle.CPUPlace()
3667
        )
3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.mish(x)
            mish = paddle.nn.Mish()
            out2 = mish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_mish(self.x_np)
        for r in res:
3680
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3681 3682 3683 3684 3685 3686 3687 3688 3689

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3690
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3691 3692 3693 3694 3695 3696
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3697
            out = paddle.nn.functional.mish(x)
3698 3699 3700
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_mish(self.x_np)
3701
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3702 3703 3704 3705 3706 3707 3708

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.mish, 1)
            # The input dtype must be float16, float32, float64.
3709 3710 3711
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3712 3713
            self.assertRaises(TypeError, F.mish, x_int32)
            # support the input dtype is float16
3714 3715 3716
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3717 3718 3719
            F.mish(x_fp16)


3720
# ------------------ Test Cudnn Activation----------------------
3721
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3722 3723 3724
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


3740 3741 3742 3743 3744 3745 3746
# ------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(
    parent, atol=1e-3, grad_check=True, grad_atol=0.80
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
C
chengduo 已提交
3747 3748 3749
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
3750

C
chengduo 已提交
3751
        def test_check_output(self):
3752
            place = core.CUDAPlace(0)
C
chengduo 已提交
3753 3754 3755
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
3756

C
chengduo 已提交
3757 3758 3759 3760
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3761 3762 3763
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol
                )
C
chengduo 已提交
3764 3765 3766 3767 3768 3769 3770

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
R
ronnywang 已提交
3771
create_test_act_fp16_class(TestExpm1)
C
chengduo 已提交
3772
create_test_act_fp16_class(TestSigmoid)
Z
zxcd 已提交
3773
create_test_act_fp16_class(TestSigmoidFP16)
M
minghaoBD 已提交
3774
create_test_act_fp16_class(TestSilu)
Z
zxcd 已提交
3775
create_test_act_fp16_class(TestSiluFP16)
C
chengduo 已提交
3776 3777
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
3778
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
3779
create_test_act_fp16_class(TestHardShrink)
3780
create_test_act_fp16_class(TestSoftshrink)
C
chengduo 已提交
3781 3782 3783 3784 3785
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
J
joejiong 已提交
3786
create_test_act_fp16_class(TestTan, grad_atol=0.85)
3787
create_test_act_fp16_class(TestCosh, grad_atol=0.85)
3788
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
3789
create_test_act_fp16_class(TestSin)
3790
create_test_act_fp16_class(TestSinh)
3791 3792
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
X
xiaoting 已提交
3793 3794 3795
create_test_act_fp16_class(TestAcosh, grad_atol=0.85)
create_test_act_fp16_class(TestAsinh, grad_atol=0.85)
create_test_act_fp16_class(TestAtanh, grad_atol=0.85)
C
chengduo 已提交
3796 3797
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
3798
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
3799 3800
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
3801
create_test_act_fp16_class(TestSoftRelu, grad_atol=0.85)
C
chengduo 已提交
3802
create_test_act_fp16_class(TestELU)
3803
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
3804 3805
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
3806 3807 3808 3809
if core.is_compiled_with_rocm():
    create_test_act_fp16_class(TestLog2, atol=5e-2, grad_atol=0.85)
else:
    create_test_act_fp16_class(TestLog2, atol=5e-2)
J
joejiong 已提交
3810
create_test_act_fp16_class(TestLog10, atol=5e-2)
3811
create_test_act_fp16_class(TestLog1p, grad_atol=0.9)
C
chengduo 已提交
3812 3813
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
3814
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
3815 3816 3817 3818 3819
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
3820
create_test_act_fp16_class(TestSwish, grad_atol=0.85)
H
huangjun12 已提交
3821
create_test_act_fp16_class(TestHardSwish)
3822
create_test_act_fp16_class(TestMish, grad_atol=0.9)
A
Abhinav Arora 已提交
3823

3824

3825 3826 3827 3828 3829 3830
def create_test_act_bf16_class(
    parent, atol=1e-2, grad_check=True, grad_atol=0.80
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3831 3832 3833 3834 3835 3836 3837 3838 3839 3840
    class TestActBF16(parent):
        def init_dtype(self):
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=atol)

        def test_check_grad(self):
            place = core.CUDAPlace(0)
3841 3842 3843
            self.check_grad_with_place(
                place, ['X'], 'Out', max_relative_error=grad_atol
            )
3844 3845 3846 3847 3848 3849 3850

    cls_name = "{0}_{1}".format(parent.__name__, "bf16")
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


create_test_act_bf16_class(TestRelu)
3851
create_test_act_bf16_class(TestAbs)
3852

Q
qijun 已提交
3853 3854
if __name__ == "__main__":
    unittest.main()