test_activation_op.py 119.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
Q
qijun 已提交
16
import unittest
17
import warnings
J
joejiong 已提交
18

Q
qijun 已提交
19
import numpy as np
20
from op_test import OpTest, convert_float_to_uint16
21 22
from scipy.special import erf, expit

23
import paddle
J
joejiong 已提交
24 25
import paddle.fluid as fluid
import paddle.fluid.core as core
26
import paddle.nn.functional as F
27
import paddle.static as static
28
from paddle.fluid import Program, program_guard
29
from paddle.fluid.layer_helper import LayerHelper
Q
qijun 已提交
30

31 32
paddle.enable_static()

Q
qijun 已提交
33

34
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
35 36 37 38
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
39
            self.assertRaises(TypeError, paddle.sqrt, in1)
Z
Zhaolong Xing 已提交
40
            # The input dtype of sqrt op must be float16, float32, float64.
G
GGBond8488 已提交
41 42
            in2 = paddle.static.data(
                name='input2', shape=[-1, 12, 10], dtype="int32"
43
            )
44
            self.assertRaises(TypeError, paddle.sqrt, in2)
Z
Zhaolong Xing 已提交
45

G
GGBond8488 已提交
46 47
            in3 = paddle.static.data(
                name='input3', shape=[-1, 12, 10], dtype="float16"
48
            )
49
            paddle.sqrt(x=in3)
Z
Zhaolong Xing 已提交
50 51


C
chengduo 已提交
52
class TestActivation(OpTest):
Q
qijun 已提交
53 54
    def setUp(self):
        self.op_type = "exp"
55
        self.init_dtype()
56
        self.init_shape()
57
        self.init_kernel_type()
C
chentianyu03 已提交
58 59
        self.check_eager = True
        self.python_api = paddle.exp
60

61
        np.random.seed(2049)
62
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
63 64 65 66
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
67 68

    def test_check_output(self):
69 70 71 72
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_output(check_eager=check_eager)
Q
qijun 已提交
73 74

    def test_check_grad(self):
75 76
        if self.dtype == np.float16:
            return
77 78 79 80
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
Q
qijun 已提交
81

82
    def init_dtype(self):
83
        self.dtype = np.float64
84

85 86 87
    def init_shape(self):
        self.shape = [11, 17]

88 89 90
    def init_kernel_type(self):
        pass

Q
qijun 已提交
91

92 93 94 95 96
class TestActivation_ZeroDim(TestActivation):
    def init_shape(self):
        self.shape = []


97
class TestExpFp32_Prim(OpTest):
98 99 100 101 102 103 104 105 106 107 108 109 110
    def setUp(self):
        self.op_type = "exp"
        self.prim_op_type = "prim"
        self.init_dtype()
        self.init_shape()
        self.python_api = paddle.exp

        np.random.seed(2049)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
111
        self.if_skip_cinn()
112 113 114 115 116 117 118 119 120 121 122 123 124

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)

    def init_dtype(self):
        self.dtype = np.float32

    def init_shape(self):
        self.shape = [12, 17]

125
    def if_skip_cinn(self):
126
        self.enable_cinn = True
127 128


129
class TestExpFp64_Prim(TestExpFp32_Prim):
130 131 132 133
    def init_dtype(self):
        self.dtype = np.float64


134
class TestExpFp16_Prim(TestExpFp32_Prim):
135 136 137 138 139 140 141
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
142
        self.check_grad(['X'], 'Out', check_prim=True, only_check_prim=True)
143

144
    def if_skip_cinn(self):
145
        self.enable_cinn = True
146 147


148
class TestExpPrim_ZeroDim(TestExpFp32_Prim):
149 150 151
    def init_shape(self):
        self.shape = []

152
    def if_skip_cinn(self):
153 154 155
        self.enable_cinn = False


R
ronnywang 已提交
156 157 158
class TestExpm1(TestActivation):
    def setUp(self):
        self.op_type = "expm1"
159
        self.python_api = paddle.expm1
R
ronnywang 已提交
160
        self.init_dtype()
161
        self.init_shape()
R
ronnywang 已提交
162 163

        np.random.seed(2049)
164
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
R
ronnywang 已提交
165 166 167 168 169 170
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
171 172 173 174
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
R
ronnywang 已提交
175 176


177 178 179 180 181
class TestExpm1_ZeroDim(TestExpm1):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
class TestExpm1API(unittest.TestCase):
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        paddle.enable_static()

        def run(place):
            with paddle.static.program_guard(paddle.static.Program()):
                X = paddle.fluid.data('X', self.shape, dtype=self.dtype)
                out = paddle.expm1(X)
                exe = paddle.static.Executor(place)
                res = exe.run(feed={'X': self.x})
            for r in res:
206
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
207 208 209 210 211 212 213 214 215

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
        def run(place):
            paddle.disable_static(place)
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
216
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
217 218 219 220 221 222 223 224 225 226 227 228 229
            paddle.enable_static()

        for place in self.place:
            run(place)

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            X = paddle.fluid.data('X', self.shape, dtype='int32')
            self.assertRaises(TypeError, paddle.expm1, X)
        # The input dtype must be float16, float32, float64.


230
class TestParameter:
231 232
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
233 234
            if paddle.fluid.framework.in_dygraph_mode():
                paddle.enable_static()
G
GGBond8488 已提交
235 236
            np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
            data = paddle.static.data(name="X", shape=[-1, 1], dtype="float32")
W
WuHaobo 已提交
237
            out = eval("paddle.%s(data, name='Y')" % self.op_type)
238 239
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
240
            (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
W
WuHaobo 已提交
241
            expected = eval("np.%s(np_x)" % self.op_type)
242
            np.testing.assert_allclose(result, expected, rtol=1e-05)
243 244 245 246 247 248 249

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
250
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
251 252


C
chengduo 已提交
253
class TestSigmoid(TestActivation):
Q
qijun 已提交
254 255
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
256 257 258
        self.prim_op_type = "comp"
        self.enable_cinn = False
        self.python_api = paddle.nn.functional.sigmoid
259
        self.init_dtype()
260
        self.init_shape()
261

262
        np.random.seed(1024)
263
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
264 265 266 267
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
268

269 270 271
    def init_dtype(self):
        self.dtype = np.float32

272
    def test_check_grad(self):
273 274
        if self.dtype == np.float16:
            return
Z
zxcd 已提交
275
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True)
276

277

278 279 280 281 282
class TestSigmoid_ZeroDim(TestSigmoid):
    def init_shape(self):
        self.shape = []


283 284 285
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
286 287 288
class TestSigmoidBF16(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
289 290 291
        self.prim_op_type = "comp"
        self.enable_cinn = False
        self.python_api = paddle.nn.functional.sigmoid
292
        self.init_dtype()
293
        self.init_shape()
294
        np.random.seed(1024)
295
        x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
296 297 298 299 300 301 302 303 304 305
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

306 307 308
    def init_shape(self):
        self.shape = [11, 17]

309 310
    def test_check_output(self):
        place = core.CUDAPlace(0)
311
        # elementwise_pow doesn't support bfloat16, skip check_prim here.
312 313 314 315 316 317 318
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out')


319 320 321 322 323 324 325 326
'''
class TestSigmoidBF16_ZeroDim(TestSigmoidBF16):

    def init_shape(self):
        self.shape = []
'''


M
minghaoBD 已提交
327 328 329
class TestSilu(TestActivation):
    def setUp(self):
        self.op_type = "silu"
Z
zxcd 已提交
330
        self.prim_op_type = "comp"
331
        self.enable_cinn = True
Z
zxcd 已提交
332
        self.python_api = paddle.nn.functional.silu
M
minghaoBD 已提交
333
        self.init_dtype()
334
        self.init_shape()
335
        self.if_skip_cinn()
M
minghaoBD 已提交
336 337

        np.random.seed(1024)
338
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
M
minghaoBD 已提交
339 340 341 342 343 344 345 346
        out = x / (np.exp(-x) + 1)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}

    def init_dtype(self):
        self.dtype = np.float32

347 348 349
    def if_skip_cinn(self):
        pass

M
minghaoBD 已提交
350
    def test_check_grad(self):
Z
zxcd 已提交
351
        self.check_grad(['X'], 'Out', check_prim=True)
M
minghaoBD 已提交
352 353


354 355 356
class TestSilu_ZeroDim(TestSilu):
    def init_shape(self):
        self.shape = []
Z
zxcd 已提交
357

358 359
    def if_skip_cinn(self):
        self.enable_cinn = False
Z
zxcd 已提交
360 361


M
minghaoBD 已提交
362 363 364 365
class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
366 367 368
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
M
minghaoBD 已提交
369
            else paddle.CPUPlace()
370
        )
M
minghaoBD 已提交
371 372 373 374 375 376 377 378 379 380 381 382

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [11, 17])
            out1 = F.silu(x)
            m = paddle.nn.Silu()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in res:
383
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
384 385 386 387 388 389 390 391 392

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
393
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
394 395 396 397 398 399 400
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.silu, 1)
            # The input dtype must be float16, float32, float64.
401 402 403
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
M
minghaoBD 已提交
404 405
            self.assertRaises(TypeError, F.silu, x_int32)
            # support the input dtype is float16
406 407 408
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
M
minghaoBD 已提交
409 410 411
            F.silu(x_fp16)


C
chengduo 已提交
412
class TestLogSigmoid(TestActivation):
413 414
    def setUp(self):
        self.op_type = "logsigmoid"
415
        self.init_dtype()
416
        self.init_shape()
417

418
        np.random.seed(2048)
419
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
420 421
        out = np.log(1 / (1 + np.exp(-x)))

422
        self.inputs = {'X': x}
423
        self.outputs = {'Out': out}
424 425

    def test_check_grad(self):
426 427
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
428
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
429 430


431 432 433 434 435
class TestLogSigmoid_ZeroDim(TestLogSigmoid):
    def init_shape(self):
        self.shape = []


436
class TestLogSigmoidAPI(unittest.TestCase):
437
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
438
    def setUp(self):
439
        np.random.seed(1024)
440
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
441 442 443
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
444
            else paddle.CPUPlace()
445
        )
446 447

    def test_static_api(self):
448
        paddle.enable_static()
449
        with paddle.static.program_guard(paddle.static.Program()):
450
            x = paddle.fluid.data('X', [11, 17])
451
            out1 = F.log_sigmoid(x)
452 453 454 455 456 457
            m = paddle.nn.LogSigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in res:
458
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
459 460 461 462

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
463
        out1 = F.log_sigmoid(x)
464 465 466 467
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
468
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
469 470 471
        paddle.enable_static()

    def test_errors(self):
472
        paddle.enable_static()
473 474
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
475
            self.assertRaises(TypeError, F.log_sigmoid, 1)
476
            # The input dtype must be float16, float32, float64.
477 478 479
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
480
            self.assertRaises(TypeError, F.log_sigmoid, x_int32)
481
            # support the input dtype is float16
482 483 484
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
485
            F.log_sigmoid(x_fp16)
486 487


488
class TestTanh(TestActivation, TestParameter):
489 490
    def setUp(self):
        self.op_type = "tanh"
491
        self.init_dtype()
492 493
        self.init_shape()

494
        np.random.seed(1024)
495
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
496 497 498 499
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
500 501

    def test_check_grad(self):
502 503
        if self.dtype == np.float16:
            return
504
        self.check_grad(['X'], 'Out')
505

506
    def init_dtype(self):
507
        # TODO If dtype is float64, the output (Out) has diff at CPUPlace
508 509 510 511
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

512

513 514 515 516 517
class TestTanh_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


W
WangXi 已提交
518 519 520 521
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
522
        np.random.seed(1024)
W
WangXi 已提交
523
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
524 525 526
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
W
WangXi 已提交
527
            else paddle.CPUPlace()
528
        )
529 530 531 532
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
533 534

    def test_static_api(self):
535
        paddle.enable_static()
W
WangXi 已提交
536
        with paddle.static.program_guard(paddle.static.Program()):
537
            x = paddle.fluid.data('X', [10, 12], self.dtype)
538
            out1 = self.tanh(x)
W
WangXi 已提交
539 540 541 542 543 544
            th = paddle.nn.Tanh()
            out2 = th(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.tanh(self.x_np)
        for r in res:
545
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
546 547 548

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
549
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
550 551 552 553 554 555
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
556
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
557 558 559
        paddle.enable_static()

    def test_errors(self):
560
        paddle.enable_static()
W
WangXi 已提交
561 562
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
563
            self.assertRaises(TypeError, self.tanh, 1)
W
WangXi 已提交
564
            # The input dtype must be float16, float32.
565 566 567
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
568
            self.assertRaises(TypeError, self.tanh, x_int32)
W
WangXi 已提交
569
            # support the input dtype is float16
570 571 572
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
573 574 575 576 577 578 579
            self.tanh(x_fp16)


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
580 581


582
class TestAtan(TestActivation, TestParameter):
583 584 585
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()
586
        self.init_shape()
587

588
        np.random.seed(1024)
589
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
590 591 592 593 594 595 596 597
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
598
        self.check_grad(['X'], 'Out')
599

W
WuHaobo 已提交
600 601
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
G
GGBond8488 已提交
602 603
            np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
            data = paddle.static.data(name="X", shape=[-1, 1], dtype="float32")
W
WuHaobo 已提交
604 605 606
            out = paddle.atan(data, name='Y')
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
607
            (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
W
WuHaobo 已提交
608 609 610
            expected = np.arctan(np_x)
            self.assertEqual(result, expected)

611 612 613 614 615 616 617 618
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

619

620 621 622 623 624
class TestAtan_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


625 626 627 628
class TestSinh(TestActivation):
    def setUp(self):
        self.op_type = "sinh"
        self.init_dtype()
629
        self.init_shape()
630

631
        np.random.seed(1024)
632
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
633 634 635 636 637 638 639 640 641 642
        out = np.sinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

643 644 645 646 647 648 649

class TestSinh_ZeroDim(TestSinh):
    def init_shape(self):
        self.shape = []


class TestSinhAPI(unittest.TestCase):
650 651 652 653
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
654
            z = paddle.sinh(x).numpy()
655
            z_expected = np.sinh(np_x)
656
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
657 658 659 660

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
661 662 663
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
G
GGBond8488 已提交
664
            data_x = paddle.static.data(
665 666 667 668
                name="data_x",
                shape=test_data_shape,
                dtype="float32",
            )
669

670
            pd_sinh_out = paddle.sinh(data_x)
671 672
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
673 674 675 676 677
            (np_sinh_res,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[pd_sinh_out],
            )
678 679

        expected_res = np.sinh(input_x)
680
        np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
681 682 683 684

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
685 686 687
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
688 689
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
690
            loss = paddle.sinh(var)
691 692 693 694 695 696 697 698 699
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
700
            self.assertRaises(TypeError, paddle.sinh, 1)
701 702
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
703
            self.assertRaises(TypeError, paddle.sinh, x_int32)
704 705
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
706
            paddle.sinh(x_fp16)
707 708 709 710 711 712


class TestCosh(TestActivation):
    def setUp(self):
        self.op_type = "cosh"
        self.init_dtype()
713
        self.init_shape()
714

715
        np.random.seed(1024)
716
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
717 718 719 720 721 722 723 724 725 726
        out = np.cosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

727 728 729 730 731 732 733

class TestCosh_ZeroDim(TestCosh):
    def init_shape(self):
        self.shape = []


class TestCoshAPI(unittest.TestCase):
734 735 736 737
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
738
            z = paddle.cosh(x).numpy()
739
            z_expected = np.cosh(np_x)
740
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
741 742 743 744

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
745 746 747
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
G
GGBond8488 已提交
748
            data_x = paddle.static.data(
749 750 751 752
                name="data_x",
                shape=test_data_shape,
                dtype="float32",
            )
753 754 755 756

            pd_cosh_out = paddle.cosh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
757 758 759 760 761
            (np_cosh_res,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[pd_cosh_out],
            )
762 763

        expected_res = np.cosh(input_x)
764
        np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
765 766 767 768

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
769 770 771
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
772 773
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
774
            loss = paddle.cosh(var)
775 776 777 778 779 780 781 782 783
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
784
            self.assertRaises(TypeError, paddle.cosh, 1)
785 786
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
787
            self.assertRaises(TypeError, paddle.cosh, x_int32)
788 789
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
790
            paddle.cosh(x_fp16)
791 792


793 794 795 796 797 798
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
K
Kavya Srinet 已提交
799 800
    def setUp(self):
        self.op_type = "tanh_shrink"
801
        self.init_dtype()
802
        self.init_shape()
803

804
        np.random.seed(1024)
805
        x = np.random.uniform(10, 20, self.shape).astype(self.dtype)
806
        out = ref_tanhshrink(x)
807

808
        self.inputs = {'X': x}
809
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
810 811

    def test_check_grad(self):
812 813
        if self.dtype == np.float16:
            return
814
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
815

816

817 818 819 820 821
class TestTanhshrink_ZeroDim(TestTanhshrink):
    def init_shape(self):
        self.shape = []


822 823 824
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
825
        np.random.seed(1024)
826
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
827 828 829
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
830
            else paddle.CPUPlace()
831
        )
832 833

    def test_static_api(self):
834
        paddle.enable_static()
835
        with paddle.static.program_guard(paddle.static.Program()):
836
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
837 838 839 840 841 842 843
            out1 = F.tanhshrink(x)
            tanhshrink = paddle.nn.Tanhshrink()
            out2 = tanhshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_tanhshrink(self.x_np)
        for r in res:
844
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
845 846 847 848 849 850 851 852 853

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
854
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
855 856 857
        paddle.enable_static()

    def test_errors(self):
858
        paddle.enable_static()
859 860 861 862
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.tanhshrink, 1)
            # The input dtype must be float16, float32, float64.
863 864 865
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
866 867
            self.assertRaises(TypeError, F.tanhshrink, x_int32)
            # support the input dtype is float16
868 869 870
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
871 872 873
            F.tanhshrink(x_fp16)


874 875 876 877 878 879
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
880
class TestHardShrink(TestActivation):
881 882
    def setUp(self):
        self.op_type = "hard_shrink"
883
        self.init_dtype()
884
        self.init_shape()
885

886 887
        self.threshold = 0.5
        self.set_attrs()
888
        np.random.seed(1024)
889
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10
890
        out = ref_hardshrink(x, self.threshold)
891

892
        self.attrs = {'threshold': self.threshold}
893
        self.inputs = {'X': x}
894
        self.outputs = {'Out': out}
895

896 897 898
    def init_shape(self):
        self.shape = [10, 12]

899 900 901
    def set_attrs(self):
        pass

902
    def test_check_grad(self):
903 904
        if self.dtype == np.float16:
            return
905
        self.check_grad(['X'], 'Out')
906 907


908 909 910 911 912
class TestHardShrink_threshold_negative(TestHardShrink):
    def set_attrs(self):
        self.threshold = -0.1


913 914 915 916 917 918 919 920
'''
class TestHardShrink_ZeroDim(TestHardShrink):

    def init_shape(self):
        self.shape = []
'''


921 922 923
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
924
        np.random.seed(1024)
925
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
926 927 928
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
929
            else paddle.CPUPlace()
930
        )
931 932

    def test_static_api(self):
933
        paddle.enable_static()
934
        with paddle.static.program_guard(paddle.static.Program()):
935
            x = paddle.fluid.data('X', [10, 12])
936 937 938 939 940 941 942
            out1 = F.hardshrink(x)
            hd = paddle.nn.Hardshrink()
            out2 = hd(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in res:
943
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
944 945 946

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
947
        x = paddle.to_tensor(self.x_np)
948 949 950 951 952
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
953
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
954 955 956 957 958 959

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
960
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
961 962
        paddle.enable_static()

963
    def test_errors(self):
964
        paddle.enable_static()
965
        with paddle.static.program_guard(paddle.static.Program()):
966
            # The input type must be Variable.
967
            self.assertRaises(TypeError, F.hardshrink, 1)
968
            # The input dtype must be float16, float32, float64.
969 970 971
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
972
            self.assertRaises(TypeError, F.hardshrink, x_int32)
973
            # support the input dtype is float16
974 975 976
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
977
            F.hardshrink(x_fp16)
978 979


980 981 982 983 984 985 986 987 988 989 990
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
991
        np.random.seed(1024)
992
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
993 994 995
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
996
            else paddle.CPUPlace()
997
        )
998 999

    def test_static_api(self):
1000
        paddle.enable_static()
1001
        with paddle.static.program_guard(paddle.static.Program()):
1002
            x = paddle.fluid.data('X', [10, 12])
1003 1004 1005 1006 1007 1008 1009
            out1 = F.hardtanh(x)
            m = paddle.nn.Hardtanh()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardtanh(self.x_np)
        for r in res:
1010
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1011 1012 1013

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
1014
        x = paddle.to_tensor(self.x_np)
1015 1016 1017 1018 1019
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
1020
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1021 1022 1023 1024 1025 1026

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
1027
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1028 1029 1030
        paddle.enable_static()

    def test_errors(self):
1031
        paddle.enable_static()
1032 1033 1034 1035
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.hardtanh, 1)
            # The input dtype must be float16, float32, float64.
1036 1037 1038
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1039 1040
            self.assertRaises(TypeError, F.hardtanh, x_int32)
            # support the input dtype is float16
1041 1042 1043
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1044 1045 1046
            F.hardtanh(x_fp16)


1047 1048 1049
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
1050 1051
        out - threshold
    )
1052 1053 1054 1055
    return out


class TestSoftshrink(TestActivation):
1056 1057
    def setUp(self):
        self.op_type = "softshrink"
1058 1059
        self.check_eager = True
        self.python_api = paddle.nn.functional.softshrink
1060
        self.init_dtype()
1061
        self.init_shape()
1062

1063
        threshold = 0.8
1064

1065
        np.random.seed(1023)
1066
        x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype)
1067 1068 1069
        out = ref_softshrink(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"lambda": threshold}
1070
        self.outputs = {'Out': out}
1071 1072

    def test_check_grad(self):
1073 1074
        if self.dtype == np.float16:
            return
1075
        self.check_grad(['X'], 'Out', check_eager=True)
1076

1077

1078 1079 1080 1081 1082
class TestSoftshrink_ZeroDim(TestSoftshrink):
    def init_shape(self):
        self.shape = []


1083 1084 1085 1086
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
1087
        np.random.seed(1024)
1088
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
1089 1090 1091
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1092
            else paddle.CPUPlace()
1093
        )
1094 1095

    def test_static_api(self):
1096
        paddle.enable_static()
1097
        with paddle.static.program_guard(paddle.static.Program()):
1098
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1099 1100 1101 1102 1103 1104 1105
            out1 = F.softshrink(x, self.threshold)
            softshrink = paddle.nn.Softshrink(self.threshold)
            out2 = softshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in res:
1106
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1107 1108 1109 1110 1111 1112 1113 1114 1115

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
1116
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1117 1118
        paddle.enable_static()

1119
    def test_errors(self):
1120
        paddle.enable_static()
1121
        with paddle.static.program_guard(paddle.static.Program()):
1122
            # The input type must be Variable.
1123
            self.assertRaises(TypeError, F.softshrink, 1)
1124
            # The input dtype must be float16, float32, float64.
1125 1126 1127
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1128
            self.assertRaises(TypeError, F.softshrink, x_int32)
1129
            # The threshold must be no less than zero
1130 1131 1132
            x_fp32 = paddle.fluid.data(
                name='x_fp32', shape=[12, 10], dtype='float32'
            )
1133
            self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
1134
            # support the input dtype is float16
1135 1136 1137
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1138
            F.softshrink(x_fp16)
1139 1140


1141
class TestSqrt(TestActivation, TestParameter):
1142 1143
    def setUp(self):
        self.op_type = "sqrt"
1144
        self.prim_op_type = "prim"
1145
        self.python_api = paddle.sqrt
1146
        self.init_dtype()
1147
        self.init_shape()
1148

1149
        np.random.seed(1023)
1150
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
1151 1152 1153 1154
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1155
        self.enable_cinn = False
1156

1157
    # TODO(wanghao107) add prim test
1158
    def test_check_grad(self):
1159 1160
        if self.dtype == np.float16:
            return
1161 1162 1163 1164
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
1165

1166

1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
class TestSqrtPrimFp32(TestActivation):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "prim"
        self.python_api = paddle.sqrt
        self.init_dtype()
        self.init_shape()
        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1180
        self.enable_cinn = True
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)

    def test_check_output(self):
        self.check_output(check_eager=True)

    def init_dtype(self):
        self.dtype = np.float32


1194 1195 1196
class TestSqrt_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []
1197
        self.enable_cinn = False
1198 1199


1200 1201 1202
class TestSqrtPrim_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []
1203
        self.enable_cinn = False
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213

    def init_dtype(self):
        self.dtype = np.float32

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_prim=True)


1214 1215 1216
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
1217 1218 1219
class TestSqrtBF16(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
1220
        self.prim_op_type = "prim"
1221
        self.python_api = paddle.sqrt
1222
        self.init_dtype()
1223
        self.init_shape()
1224 1225

        np.random.seed(1023)
1226
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
1227 1228 1229 1230 1231 1232
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}
1233 1234
        # TODO(wanghao107): add prim test
        self.enable_cinn = False
1235 1236 1237 1238

    def init_dtype(self):
        self.dtype = np.uint16

1239 1240 1241
    def init_shape(self):
        self.shape = [11, 17]

1242 1243
    def test_check_output(self):
        place = core.CUDAPlace(0)
1244
        self.check_output_with_place(place, check_eager=True)
1245 1246 1247

    def test_check_grad(self):
        place = core.CUDAPlace(0)
1248
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1249 1250


Z
zhoukunsheng 已提交
1251 1252 1253
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
Z
zyfncg 已提交
1254
        self.python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1255
        self.init_dtype()
1256
        self.init_shape()
Z
zhoukunsheng 已提交
1257

1258
        np.random.seed(1024)
1259
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
1260 1261 1262 1263 1264
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1265 1266 1267
    def init_shape(self):
        self.shape = [10, 12]

Z
zhoukunsheng 已提交
1268 1269 1270
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1271 1272 1273
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.0005, check_eager=True
        )
Z
zhoukunsheng 已提交
1274 1275


1276 1277 1278 1279 1280 1281 1282 1283
'''
class TestRsqrt_ZeroDim(TestRsqrt):

    def init_shape(self):
        self.shape = []
'''


C
chengduo 已提交
1284
class TestAbs(TestActivation):
1285 1286
    def setUp(self):
        self.op_type = "abs"
1287 1288 1289
        self.prim_op_type = "prim"
        self.python_api = paddle.abs
        self.enable_cinn = False
1290
        self.init_dtype()
1291
        self.init_shape()
1292

1293
        np.random.seed(1024)
1294
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
C
chengduo 已提交
1295
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1296
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1297
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1298 1299
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1300 1301 1302 1303
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1304

1305 1306 1307
    def init_shape(self):
        self.shape = [4, 25]

1308
    def test_check_grad(self):
1309 1310
        if self.dtype == np.float16:
            return
1311
        self.check_grad(['X'], 'Out', check_eager=False, check_prim=True)
1312

1313

1314 1315 1316 1317 1318
class TestAbs_ZeroDim(TestAbs):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1319
class TestCeil(TestActivation):
D
dzhwinter 已提交
1320 1321
    def setUp(self):
        self.op_type = "ceil"
1322 1323
        self.check_eager = True
        self.python_api = paddle.ceil
1324
        self.init_dtype()
1325
        self.init_shape()
1326

1327
        np.random.seed(1024)
1328
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1329 1330 1331 1332
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1333

1334 1335 1336
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1337
    # The same reason with TestFloor
C
chengduo 已提交
1338
    def test_check_grad(self):
1339 1340 1341
        pass


1342 1343 1344 1345 1346
class TestCeil_ZeroDim(TestCeil):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1347
class TestFloor(TestActivation):
D
dzhwinter 已提交
1348 1349
    def setUp(self):
        self.op_type = "floor"
1350
        self.prim_op_type = "prim"
1351 1352
        self.check_eager = True
        self.python_api = paddle.floor
1353
        self.init_dtype()
1354
        self.init_shape()
1355

1356
        np.random.seed(1024)
1357
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1358 1359 1360 1361
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1362

1363 1364 1365
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1366
    # the gradient on floor, ceil, round is undefined.
1367
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1368 1369
    # The same reason with TestFloor
    def test_check_grad(self):
1370 1371 1372
        pass


1373 1374 1375 1376 1377
class TestFloor_ZeroDim(TestFloor):
    def init_shape(self):
        self.shape = []


1378
class TestFloor_Prim(TestActivation):
1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
    def setUp(self):
        self.op_type = "floor"
        self.prim_op_type = "prim"
        self.check_eager = True
        self.python_api = paddle.floor
        self.init_dtype()
        self.init_shape()

        if len(self.shape) == 0:
            # for 0-D tensor, skip cinn testing
            self.enable_cinn = False

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def init_shape(self):
        self.shape = [10, 12]

    def test_check_grad(self):
1402 1403 1404 1405 1406
        # the gradient on floor, ceil, round is undefined.
        # we return zero as gradient, but the numpy return nan.
        # for prim, we compare result with eager python api,
        # so, we use only_prim flag to express we only test prim.
        self.check_grad(['X'], 'Out', check_prim=True, only_check_prim=True)
1407 1408


1409
class TestFloor_ZeroDim_Prim(TestFloor_Prim):
1410 1411 1412 1413
    def init_shape(self):
        self.shape = []


1414
class TestFloorFp16_Prim(TestFloor_Prim):
1415 1416 1417 1418
    def init_dtype(self):
        self.dtype = np.float16


C
chengduo 已提交
1419
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
1420 1421
    def setUp(self):
        self.op_type = "cos"
1422
        self.init_dtype()
1423
        self.init_shape()
1424

1425
        np.random.seed(1024)
1426
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1427 1428 1429 1430
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
1431

1432 1433 1434
    def init_shape(self):
        self.shape = [10, 12]

C
add sin  
chengduoZH 已提交
1435
    def test_check_grad(self):
1436 1437
        if self.dtype == np.float16:
            return
1438
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
1439

1440

1441 1442 1443 1444 1445
class TestCos_ZeroDim(TestCos):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
1446 1447 1448 1449 1450
class TestTan(TestActivation):
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
        self.init_dtype()
1451 1452
        self.init_shape()

J
joejiong 已提交
1453
        self.dtype = 'float32'
1454
        self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1455 1456 1457
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
J
joejiong 已提交
1458
            else paddle.CPUPlace()
1459
        )
J
joejiong 已提交
1460 1461 1462 1463 1464 1465

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}

1466 1467 1468
    def init_shape(self):
        self.shape = [10, 12]

J
joejiong 已提交
1469 1470 1471 1472 1473
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484

class TestTan_ZeroDim(TestTan):
    def init_shape(self):
        self.shape = []


class TestTanAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(1024)
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1485 1486 1487
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1488
            else paddle.CPUPlace()
1489
        )
1490

J
joejiong 已提交
1491 1492 1493 1494 1495
    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1496
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1497 1498 1499 1500 1501
        paddle.enable_static()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
1502
            x = paddle.static.data('X', [11, 17], self.dtype)
J
joejiong 已提交
1503 1504 1505 1506
            out = paddle.tan(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tan(self.x_np)
1507
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1508 1509 1510 1511

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
1512 1513 1514
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
J
joejiong 已提交
1515 1516 1517 1518 1519 1520 1521 1522
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1523 1524 1525 1526
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()
1527
        self.init_shape()
1528

1529
        np.random.seed(1024)
1530
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1531 1532 1533 1534 1535
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1536 1537 1538
    def init_shape(self):
        self.shape = [10, 12]

1539 1540 1541
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1542
        self.check_grad(['X'], 'Out')
1543 1544


1545 1546 1547 1548 1549
class TestAcos_ZeroDim(TestAcos):
    def init_shape(self):
        self.shape = []


1550
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
1551 1552
    def setUp(self):
        self.op_type = "sin"
1553
        self.init_dtype()
1554
        self.init_shape()
1555 1556
        # prim not support now
        self.enable_cinn = False
1557

1558
        np.random.seed(1024)
1559
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1560 1561 1562 1563
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
1564

1565 1566 1567
    def init_shape(self):
        self.shape = [10, 12]

C
add cos  
chengduoZH 已提交
1568
    def test_check_grad(self):
1569 1570
        if self.dtype == np.float16:
            return
1571
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
1572 1573


1574 1575 1576 1577 1578
class TestSin_ZeroDim(TestSin):
    def init_shape(self):
        self.shape = []


1579 1580 1581 1582
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()
1583
        self.init_shape()
1584

1585
        np.random.seed(2048)
1586
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1587 1588 1589 1590 1591
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1592 1593 1594
    def init_shape(self):
        self.shape = [10, 12]

1595 1596 1597
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1598
        self.check_grad(['X'], 'Out')
1599 1600


1601 1602 1603 1604 1605
class TestAsin_ZeroDim(TestAsin):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1606 1607 1608 1609
class TestAcosh(TestActivation):
    def setUp(self):
        self.op_type = "acosh"
        self.init_dtype()
1610
        self.init_shape()
X
xiaoting 已提交
1611 1612

        np.random.seed(1024)
1613
        x = np.random.uniform(2, 3, self.shape).astype(self.dtype)
X
xiaoting 已提交
1614 1615 1616 1617 1618
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1619 1620 1621
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1622 1623 1624 1625 1626 1627
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1628 1629 1630 1631 1632
class TestAcosh_ZeroDim(TestAcosh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1633 1634 1635 1636
class TestAsinh(TestActivation):
    def setUp(self):
        self.op_type = "asinh"
        self.init_dtype()
1637
        self.init_shape()
X
xiaoting 已提交
1638 1639

        np.random.seed(1024)
1640
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
X
xiaoting 已提交
1641 1642 1643 1644 1645
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1646 1647 1648
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1649 1650 1651 1652 1653 1654
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1655 1656 1657 1658 1659
class TestAsinh_ZeroDim(TestAsinh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1660 1661 1662 1663
class TestAtanh(TestActivation):
    def setUp(self):
        self.op_type = "atanh"
        self.init_dtype()
1664
        self.init_shape()
X
xiaoting 已提交
1665 1666

        np.random.seed(400)
1667
        x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype)
X
xiaoting 已提交
1668 1669 1670 1671 1672
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1673 1674 1675
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1676 1677 1678 1679 1680 1681
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1682 1683 1684 1685 1686
class TestAtanh_ZeroDim(TestAtanh):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1687
class TestRound(TestActivation):
D
dzhwinter 已提交
1688 1689
    def setUp(self):
        self.op_type = "round"
1690 1691
        self.check_eager = True
        self.python_api = paddle.round
1692
        self.init_dtype()
1693
        self.init_shape()
1694

1695
        np.random.seed(1024)
1696
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1697 1698 1699 1700
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1701

1702 1703 1704
    def init_shape(self):
        self.shape = [10, 12]

C
chengduo 已提交
1705
    def test_check_grad(self):
1706 1707 1708
        pass


1709 1710 1711 1712 1713
class TestRound_ZeroDim(TestRound):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1714
class TestRelu(TestActivation):
1715
    def setUp(self):
Q
qijun 已提交
1716
        self.op_type = "relu"
K
Kang Zhao 已提交
1717 1718
        self.python_api = paddle.nn.functional.relu
        self.prim_op_type = "comp"
K
Kexin Zhao 已提交
1719
        self.init_dtype()
1720
        self.init_shape()
K
Kang Zhao 已提交
1721
        self.skip_cinn()
K
Kexin Zhao 已提交
1722

1723
        np.random.seed(1024)
1724
        if self.dtype == np.uint16:
1725
            x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
1726 1727 1728 1729 1730
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = convert_float_to_uint16(np.maximum(x, 0))
            self.inputs = {'X': convert_float_to_uint16(x)}
        else:
1731
            x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1732 1733 1734 1735
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = np.maximum(x, 0)
            self.inputs = {'X': x}
K
Kexin Zhao 已提交
1736 1737

        self.outputs = {'Out': out}
1738 1739

    def test_check_grad(self):
K
Kexin Zhao 已提交
1740 1741
        if self.dtype == np.float16:
            return
K
Kang Zhao 已提交
1742 1743 1744 1745 1746 1747 1748
        self.check_grad(['X'], 'Out', check_prim=True)

    def test_check_output(self):
        self.check_output(check_prim=True)

    def skip_cinn(self):
        self.enable_cinn = False
A
Adam 已提交
1749 1750


1751 1752 1753 1754
class TestRelu_ZeroDim(TestRelu):
    def init_shape(self):
        self.shape = []

K
Kang Zhao 已提交
1755 1756 1757
    def skip_cinn(self):
        self.enable_cinn = False

1758

1759 1760 1761
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1762
        np.random.seed(1024)
1763
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1764 1765 1766
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1767
            else paddle.CPUPlace()
1768
        )
1769 1770 1771 1772
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1773 1774

    def test_static_api(self):
1775
        paddle.enable_static()
1776
        with paddle.static.program_guard(paddle.static.Program()):
1777
            x = paddle.fluid.data('X', [10, 12])
1778
            out1 = self.relu(x)
1779 1780 1781 1782 1783 1784
            m = paddle.nn.ReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.maximum(self.x_np, 0)
        for r in res:
1785
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1786 1787 1788 1789 1790

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1791 1792
        out1 = m(x)
        out2 = self.relu(x)
1793 1794
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1795
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1796 1797
        paddle.enable_static()

1798
    def test_errors(self):
1799
        paddle.enable_static()
1800
        with paddle.static.program_guard(paddle.static.Program()):
1801
            # The input type must be Variable.
1802
            self.assertRaises(TypeError, self.relu, 1)
1803
            # The input dtype must be float16, float32, float64.
1804 1805 1806
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
1807
            self.assertRaises(TypeError, self.relu, x_int32)
1808
            # support the input dtype is float16
1809 1810 1811
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
1812 1813 1814 1815 1816 1817 1818
            self.relu(x_fp16)


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1819 1820


1821 1822 1823 1824 1825 1826
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1827
class TestLeakyRelu(TestActivation):
1828 1829 1830
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1831 1832 1833
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()
1834
        self.init_shape()
1835
        alpha = self.get_alpha()
A
Adam 已提交
1836

1837
        np.random.seed(1024)
1838
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
A
Adam 已提交
1839
        # The same reason with TestAbs
1840 1841
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1842

1843
        self.inputs = {'X': x}
A
Adam 已提交
1844
        self.outputs = {'Out': out}
1845
        self.attrs = {'alpha': alpha}
A
Adam 已提交
1846 1847 1848 1849

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1850
        self.check_grad(['X'], 'Out')
1851 1852


1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867
class TestLeakyReluAlpha1(TestLeakyRelu):
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
    def get_alpha(self):
        return -2.0


1868 1869 1870 1871 1872
class TestLeakyRelu_ZeroDim(TestLeakyRelu):
    def init_shape(self):
        self.shape = []


1873 1874 1875
class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    def setUp(self):
1876
        np.random.seed(1024)
1877
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1878 1879 1880
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1881
            else paddle.CPUPlace()
1882
        )
1883 1884

    def test_static_api(self):
1885
        paddle.enable_static()
1886
        with paddle.static.program_guard(paddle.static.Program()):
1887
            x = paddle.fluid.data('X', [10, 12])
1888 1889 1890 1891 1892 1893 1894
            out1 = F.leaky_relu(x)
            m = paddle.nn.LeakyReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_leaky_relu(self.x_np)
        for r in res:
1895
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1896 1897 1898

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
1899
        x = paddle.to_tensor(self.x_np)
1900 1901 1902 1903 1904
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
1905
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1906 1907 1908 1909 1910 1911

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
1912
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1913 1914
        paddle.enable_static()

1915
    def test_errors(self):
1916
        paddle.enable_static()
1917
        with paddle.static.program_guard(paddle.static.Program()):
1918
            # The input type must be Variable.
1919
            self.assertRaises(TypeError, F.leaky_relu, 1)
1920
            # The input dtype must be float16, float32, float64.
1921 1922 1923
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1924 1925
            self.assertRaises(TypeError, F.leaky_relu, x_int32)
            # support the input dtype is float16
1926 1927 1928
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1929
            F.leaky_relu(x_fp16)
1930 1931


1932 1933
def gelu(x, approximate):
    if approximate:
1934 1935 1936 1937 1938 1939 1940 1941
        y_ref = (
            0.5
            * x
            * (
                1.0
                + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
            )
        )
1942 1943 1944 1945 1946 1947
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
1948 1949 1950
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1951
        self.init_shape()
1952
        approximate = True
1953
        np.random.seed(1024)
1954
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1955
        out = gelu(x, approximate)
C
Clementine 已提交
1956

1957
        self.inputs = {'X': x}
1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1971
        self.init_shape()
1972
        approximate = False
1973
        np.random.seed(2048)
1974
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1975
        out = gelu(x, approximate)
C
Clementine 已提交
1976

1977
        self.inputs = {'X': x}
C
Clementine 已提交
1978
        self.outputs = {'Out': out}
1979
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
1980 1981 1982 1983

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1984
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
1985 1986


1987 1988 1989 1990 1991
class TestGelu_ZeroDim(TestGelu):
    def init_shape(self):
        self.shape = []


1992 1993 1994
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
1995
        np.random.seed(1024)
1996
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
1997 1998 1999
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2000
            else paddle.CPUPlace()
2001
        )
2002 2003

    def test_static_api(self):
2004
        paddle.enable_static()
2005
        with paddle.static.program_guard(paddle.static.Program()):
2006
            x = paddle.fluid.data('X', [11, 17])
2007 2008 2009 2010 2011 2012 2013
            out1 = F.gelu(x)
            m = paddle.nn.GELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = gelu(self.x_np, False)
        for r in res:
2014
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2015 2016 2017 2018 2019 2020 2021 2022 2023

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
2024
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2025 2026 2027 2028 2029 2030

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
2031
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2032 2033 2034
        paddle.enable_static()

    def test_errors(self):
2035
        paddle.enable_static()
2036 2037 2038 2039
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.gelu, 1)
            # The input dtype must be float16, float32, float64.
2040 2041 2042
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
2043 2044
            self.assertRaises(TypeError, F.gelu, x_int32)
            # support the input dtype is float16
2045 2046 2047
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
2048 2049 2050
            F.gelu(x_fp16)


C
chengduo 已提交
2051
class TestBRelu(TestActivation):
2052 2053
    def setUp(self):
        self.op_type = "brelu"
2054 2055
        self.init_dtype()

2056
        np.random.seed(1024)
Z
zhupengyang 已提交
2057
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2058 2059
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
2060 2061
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
2062
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
2063 2064 2065
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
2066 2067 2068

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
2069
        self.outputs = {'Out': t}
2070 2071

    def test_check_grad(self):
2072 2073
        if self.dtype == np.float16:
            return
2074
        self.check_grad(['X'], 'Out')
2075

2076

2077 2078 2079 2080 2081 2082 2083
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
2084
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
2085
    def setUp(self):
2086
        self.op_type = "relu6"
2087
        self.init_dtype()
2088
        self.init_shape()
2089
        self.python_api = paddle.nn.functional.relu6
2090

2091
        np.random.seed(1024)
2092
        x = np.random.uniform(-1, 10, self.shape).astype(self.dtype)
2093
        x[np.abs(x) < 0.005] = 0.02
2094
        out = ref_relu6(x)
2095

2096 2097
        self.inputs = {'X': x}
        self.attrs = {'threshold': 6.0}
2098
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
2099

2100 2101 2102
    def init_shape(self):
        self.shape = [10, 12]

2103 2104 2105
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2106
        self.check_grad(['X'], 'Out', check_eager=True)
2107 2108


2109 2110 2111 2112 2113
class TestRelu6_ZeroDim(TestRelu6):
    def init_shape(self):
        self.shape = []


2114 2115 2116
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
2117
        np.random.seed(1024)
2118 2119
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
2120 2121 2122
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2123
            else paddle.CPUPlace()
2124
        )
2125 2126

    def test_static_api(self):
2127
        paddle.enable_static()
2128
        with paddle.static.program_guard(paddle.static.Program()):
2129
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2130 2131 2132 2133 2134 2135 2136
            out1 = F.relu6(x)
            relu6 = paddle.nn.ReLU6()
            out2 = relu6(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_relu6(self.x_np)
        for r in res:
2137
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2138 2139 2140 2141 2142 2143 2144 2145 2146

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
2147
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2148 2149 2150
        paddle.enable_static()

    def test_fluid_api(self):
2151
        paddle.enable_static()
2152 2153
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
2154
            out = paddle.nn.functional.relu6(x)
2155 2156 2157
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_relu6(self.x_np)
2158
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2159

2160
    def test_errors(self):
2161
        paddle.enable_static()
2162
        with paddle.static.program_guard(paddle.static.Program()):
2163
            # The input type must be Variable.
2164
            self.assertRaises(TypeError, F.relu6, 1)
2165
            # The input dtype must be float16, float32, float64.
2166 2167 2168
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2169
            self.assertRaises(TypeError, F.relu6, x_int32)
2170
            # support the input dtype is float16
2171 2172 2173
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2174
            F.relu6(x_fp16)
2175 2176


2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200
class TestRelu6APIWarnings(unittest.TestCase):
    def test_warnings(self):
        with warnings.catch_warnings(record=True) as context:
            warnings.simplefilter("always")

            paddle.enable_static()
            helper = LayerHelper("relu6")
            data = paddle.static.data(
                name='data', shape=[None, 3, 32, 32], dtype='float32'
            )
            out = helper.create_variable_for_type_inference(dtype=data.dtype)
            os.environ['FLAGS_print_extra_attrs'] = "1"
            helper.append_op(
                type="relu6",
                inputs={'X': data},
                outputs={'Out': out},
                attrs={'threshold': 6.0},
            )
            self.assertTrue(
                "op relu6 use extra_attr: threshold" in str(context[-1].message)
            )
            os.environ['FLAGS_print_extra_attrs'] = "0"


2201
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
2202 2203 2204 2205
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
2206 2207 2208
    return (
        x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale
    ).astype(x_dtype)
2209 2210


H
huangjun12 已提交
2211 2212 2213 2214
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
2215
        self.init_shape()
R
Roc 已提交
2216
        self.prim_op_type = "comp"
2217
        self.python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
2218

2219
        np.random.seed(1024)
2220
        x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
H
huangjun12 已提交
2221 2222 2223
        threshold = 6.0
        scale = 6.0
        offset = 3.0
2224
        # the same with TestAbs
H
huangjun12 已提交
2225 2226
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
2227
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
2228

2229
        self.inputs = {'X': x}
H
huangjun12 已提交
2230 2231
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}
R
Roc 已提交
2232
        self.enable_cinn = False
H
huangjun12 已提交
2233

2234 2235 2236
    def init_shape(self):
        self.shape = [10, 12]

2237 2238 2239
    def if_only_check_prim(self):
        return False

H
huangjun12 已提交
2240
    def test_check_grad(self):
2241 2242 2243 2244 2245 2246 2247
        self.check_grad(
            ['X'],
            'Out',
            check_eager=True,
            check_prim=True,
            only_check_prim=self.if_only_check_prim(),
        )
2248 2249

    def test_check_output(self):
R
Roc 已提交
2250
        self.check_output(check_eager=True, check_prim=True)
H
huangjun12 已提交
2251 2252


2253
class TestHardSwish_ZeroDim(TestHardSwish):
R
Roc 已提交
2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266
    def setUp(self):
        super().setUp()
        self.enable_cinn = False

    def init_shape(self):
        self.shape = []


class TestHardSwishFP16(TestHardSwish):
    def setUp(self):
        super().setUp()
        self.enable_cinn = False

2267 2268 2269
    def if_only_check_prim(self):
        return True

R
Roc 已提交
2270 2271 2272 2273 2274 2275 2276 2277 2278
    def init_dtype(self):
        self.dtype = np.float16


class TestHardSwish_ZeroDim_FP16(TestHardSwishFP16):
    def setUp(self):
        super().setUp()
        self.enable_cinn = False

2279 2280 2281 2282
    def init_shape(self):
        self.shape = []


2283 2284 2285 2286
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2287 2288 2289
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2290
            else paddle.CPUPlace()
2291
        )
2292 2293 2294

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
2295
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2296 2297 2298 2299 2300 2301 2302
            out1 = F.hardswish(x)
            m = paddle.nn.Hardswish()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardswish(self.x_np)
        for r in res:
2303
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2304 2305 2306

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
2307
        x = paddle.to_tensor([11648.0, 11448.0])
2308 2309 2310
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
2311
        out_ref = [11648.0, 11448.0]
2312
        for r in [out1, out2]:
2313
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2314
        paddle.enable_static()
2315 2316 2317 2318

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
2319
            out = paddle.nn.functional.hardswish(x)
2320 2321 2322
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardswish(self.x_np)
2323
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2324 2325 2326

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
2327
        out = paddle.nn.functional.hardswish(x)
2328
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
2329 2330 2331 2332
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
2333
            # The input type must be Variable.
2334
            self.assertRaises(TypeError, F.hardswish, 1)
2335
            # The input dtype must be float16, float32, float64.
2336 2337 2338
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2339
            self.assertRaises(TypeError, F.hardswish, x_int32)
2340
            # support the input dtype is float16
2341 2342 2343
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2344
            F.hardswish(x_fp16)
2345 2346


C
chengduo 已提交
2347
class TestSoftRelu(TestActivation):
2348 2349
    def setUp(self):
        self.op_type = "soft_relu"
2350 2351
        self.init_dtype()

2352
        np.random.seed(4096)
2353
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2354
        threshold = 2.0
Q
qijun 已提交
2355 2356
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
2357
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
2358 2359 2360
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
2361 2362 2363 2364 2365
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
2366 2367

    def test_check_grad(self):
2368 2369
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
2370
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
2371

2372

2373
def elu(x, alpha):
Z
zhupengyang 已提交
2374
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
2375 2376 2377
    return out_ref.astype(x.dtype)


C
chengduo 已提交
2378
class TestELU(TestActivation):
2379 2380
    def setUp(self):
        self.op_type = "elu"
2381
        self.init_dtype()
2382
        self.init_shape()
2383

2384
        np.random.seed(1024)
2385
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
Z
zhupengyang 已提交
2386
        alpha = self.get_alpha()
2387
        out = elu(x, alpha)
2388 2389 2390 2391
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
2392
        self.outputs = {'Out': out}
2393

2394 2395 2396
    def init_shape(self):
        self.shape = [10, 12]

2397
    def test_check_grad(self):
2398 2399
        if self.dtype == np.float16:
            return
2400
        self.check_grad(['X'], 'Out')
2401

Z
zhupengyang 已提交
2402
    def get_alpha(self):
2403
        return 1.0
Z
zhupengyang 已提交
2404 2405 2406 2407 2408 2409


class TestELUAlpha(TestELU):
    def get_alpha(self):
        return -0.2

2410

2411 2412 2413 2414 2415
class TestELU_ZeroDim(TestELU):
    def init_shape(self):
        self.shape = []


2416 2417 2418
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
2419
        np.random.seed(1024)
2420
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2421 2422 2423
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2424
            else paddle.CPUPlace()
2425
        )
2426 2427 2428 2429
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2430 2431

    def test_static_api(self):
2432
        paddle.enable_static()
2433
        with paddle.static.program_guard(paddle.static.Program()):
2434
            x = paddle.fluid.data('X', [10, 12])
2435
            out1 = self.elu(x)
2436 2437 2438 2439 2440 2441
            m = paddle.nn.ELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = elu(self.x_np, 1.0)
        for r in res:
2442
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2443 2444 2445 2446

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
2447 2448
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2449 2450 2451 2452
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2453
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2454

2455 2456
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2457 2458 2459 2460
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2461
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2462 2463
        paddle.enable_static()

2464
    def test_errors(self):
2465
        paddle.enable_static()
2466 2467
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
2468
            self.assertRaises(TypeError, self.elu, 1)
2469
            # The input dtype must be float16, float32, float64.
2470 2471 2472
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
2473
            self.assertRaises(TypeError, self.elu, x_int32)
2474
            # support the input dtype is float16
2475 2476 2477
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
2478 2479 2480
            self.elu(x_fp16)


Z
zhupengyang 已提交
2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)
        paddle.enable_static()


2493 2494 2495 2496 2497 2498 2499 2500 2501
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()
2502
        self.init_shape()
2503

2504
        self.python_api = paddle.nn.functional.celu
2505
        np.random.seed(1024)
2506
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
2507 2508 2509 2510 2511 2512
        alpha = 1.5
        out = celu(x, alpha)
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {'Out': out}

2513 2514 2515
    def init_shape(self):
        self.shape = [10, 12]

2516 2517 2518
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2519
        self.check_grad(['X'], 'Out', check_eager=True)
2520 2521


2522 2523 2524 2525 2526
class TestCELU_ZeroDim(TestCELU):
    def init_shape(self):
        self.shape = []


2527 2528 2529 2530 2531
class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2532 2533 2534
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2535
            else paddle.CPUPlace()
2536
        )
2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out1 = self.celu(x, 1.5)
            m = paddle.nn.CELU(1.5)
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = celu(self.x_np, 1.5)
        for r in res:
2553
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2554 2555 2556 2557 2558 2559 2560 2561 2562 2563

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2564
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2565 2566 2567 2568 2569 2570 2571

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2572
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2573 2574 2575 2576 2577 2578 2579 2580
        paddle.enable_static()

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, self.celu, 1)
            # The input dtype must be float16, float32, float64.
2581 2582 2583
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
2584 2585
            self.assertRaises(TypeError, self.celu, x_int32)
            # The alpha must be not equal 0
2586 2587 2588
            x_fp32 = paddle.fluid.data(
                name='x_fp32', shape=[10, 12], dtype='float32'
            )
2589 2590
            self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
            # support the input dtype is float16
2591 2592 2593
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
2594 2595 2596
            self.celu(x_fp16)


C
chengduo 已提交
2597
class TestReciprocal(TestActivation):
Q
qijun 已提交
2598 2599
    def setUp(self):
        self.op_type = "reciprocal"
2600
        self.python_api = paddle.reciprocal
2601
        self.init_dtype()
2602
        self.init_shape()
2603

2604
        np.random.seed(1024)
2605
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2606 2607 2608 2609
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2610 2611

    def test_check_grad(self):
2612 2613
        if self.dtype == np.float16:
            return
2614 2615 2616 2617
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2618 2619


2620 2621 2622 2623 2624
class TestReciprocal_ZeroDim(TestReciprocal):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
2625
class TestLog(TestActivation):
Q
qijun 已提交
2626 2627
    def setUp(self):
        self.op_type = "log"
2628
        self.check_eager = True
2629
        self.prim_op_type = "prim"
2630
        self.python_api = paddle.log
2631
        self.init_dtype()
2632
        self.init_shape()
2633

2634 2635 2636 2637
        if len(self.shape) == 0:
            # for 0-D tensor, skip cinn testing
            self.enable_cinn = False

2638
        np.random.seed(1024)
2639
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2640 2641 2642 2643
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2644 2645

    def test_check_grad(self):
2646 2647
        if self.dtype == np.float16:
            return
2648
        self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
Q
qijun 已提交
2649

2650
    def test_error(self):
G
GGBond8488 已提交
2651 2652
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
2653

2654 2655
        self.assertRaises(TypeError, paddle.log, in1)
        self.assertRaises(TypeError, paddle.log, in2)
2656

2657

2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672
class Test_Log_Op_Fp16(unittest.TestCase):
    def test_api_fp16(self):
        paddle.enable_static()
        with static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
            x = [[2, 3, 4], [7, 8, 9]]
            x = paddle.to_tensor(x, dtype='float16')
            out = paddle.log(x)
            if core.is_compiled_with_cuda():
                place = paddle.CUDAPlace(0)
                exe = paddle.static.Executor(place)
                (res,) = exe.run(fetch_list=[out])


2673 2674 2675 2676 2677
class TestLog_ZeroDim(TestLog):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2678 2679 2680
class TestLog2(TestActivation):
    def setUp(self):
        self.op_type = "log2"
2681 2682
        self.check_eager = True
        self.python_api = paddle.log2
J
joejiong 已提交
2683
        self.init_dtype()
2684
        self.init_shape()
J
joejiong 已提交
2685

2686
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2687 2688 2689 2690 2691 2692 2693 2694
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2695
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2696 2697 2698 2699 2700 2701 2702 2703 2704

    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log2, in1)
        self.assertRaises(TypeError, paddle.log2, in2)

    def test_api(self):
2705 2706 2707
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
J
joejiong 已提交
2708
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2709 2710 2711
            data_x = paddle.static.data(
                name="data_x", shape=[11, 17], dtype="float64"
            )
J
joejiong 已提交
2712 2713 2714 2715

            out1 = paddle.log2(data_x)
            exe = paddle.static.Executor(place=fluid.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2716 2717 2718 2719 2720
            (res1,) = exe.run(
                paddle.static.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
J
joejiong 已提交
2721
        expected_res = np.log2(input_x)
2722
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2723 2724 2725 2726 2727 2728 2729 2730

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2731
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2732 2733


2734 2735 2736 2737 2738
class TestLog2_ZeroDim(TestLog2):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2739 2740 2741
class TestLog10(TestActivation):
    def setUp(self):
        self.op_type = "log10"
2742 2743
        self.check_eager = True
        self.python_api = paddle.log10
J
joejiong 已提交
2744
        self.init_dtype()
2745
        self.init_shape()
J
joejiong 已提交
2746

2747
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2748 2749 2750 2751 2752 2753 2754 2755
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2756
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2757

2758 2759 2760 2761 2762 2763 2764

class TestLog10_ZeroDim(TestLog10):
    def init_shape(self):
        self.shape = []


class TestLog10API(unittest.TestCase):
J
joejiong 已提交
2765 2766 2767 2768 2769 2770 2771 2772
    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log10, in1)
        self.assertRaises(TypeError, paddle.log10, in2)

    def test_api(self):
2773 2774 2775
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
J
joejiong 已提交
2776
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2777 2778 2779
            data_x = paddle.static.data(
                name="data_x", shape=[11, 17], dtype="float64"
            )
J
joejiong 已提交
2780 2781 2782 2783

            out1 = paddle.log10(data_x)
            exe = paddle.static.Executor(place=paddle.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2784 2785 2786 2787 2788
            (res1,) = exe.run(
                paddle.static.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
J
joejiong 已提交
2789
        expected_res = np.log10(input_x)
2790
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2791 2792 2793 2794 2795 2796 2797 2798

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2799
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2800 2801


2802 2803 2804
class TestLog1p(TestActivation):
    def setUp(self):
        self.op_type = "log1p"
2805 2806
        self.check_eager = True
        self.python_api = paddle.log1p
2807
        self.init_dtype()
2808
        self.init_shape()
2809

2810
        np.random.seed(1024)
2811
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2812 2813 2814 2815 2816 2817 2818 2819
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2820
        self.check_grad(['X'], 'Out', check_eager=True)
2821

2822

2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837
class Test_Log1p_Op_Fp16(unittest.TestCase):
    def test_api_fp16(self):
        paddle.enable_static()
        with static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
            x = [[2, 3, 4], [7, 8, 9]]
            x = paddle.to_tensor(x, dtype='float16')
            out = paddle.log1p(x)
            if core.is_compiled_with_cuda():
                place = paddle.CUDAPlace(0)
                exe = paddle.static.Executor(place)
                (res,) = exe.run(fetch_list=[out])


2838 2839 2840 2841 2842 2843
class TestLog1p_ZeroDim(TestLog1p):
    def init_shape(self):
        self.shape = []


class TestLog1pAPI(unittest.TestCase):
2844 2845 2846
    def test_api(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
G
GGBond8488 已提交
2847
            data_x = paddle.static.data(
2848 2849 2850 2851
                name="data_x",
                shape=[11, 17],
                dtype="float64",
            )
2852 2853 2854 2855

            out1 = paddle.log1p(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
2856 2857 2858 2859 2860
            (res1,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
2861
        expected_res = np.log1p(input_x)
2862
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
2863 2864 2865 2866 2867 2868 2869 2870

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
2871
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
2872 2873


C
chengduo 已提交
2874
class TestSquare(TestActivation):
Q
qijun 已提交
2875 2876
    def setUp(self):
        self.op_type = "square"
2877
        self.python_api = paddle.square
2878
        self.init_dtype()
2879
        self.init_shape()
2880

2881
        np.random.seed(1024)
2882
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2883 2884 2885 2886
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2887 2888

    def test_check_grad(self):
2889 2890
        if self.dtype == np.float16:
            return
2891 2892 2893
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.007, check_eager=True
        )
2894 2895 2896

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2897

2898

2899 2900 2901 2902 2903
class TestSquare_ZeroDim(TestSquare):
    def init_shape(self):
        self.shape = []


2904 2905 2906
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
2907 2908 2909
class TestSquareBF16(OpTest):
    def setUp(self):
        self.op_type = "square"
2910
        self.python_api = paddle.square
2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
2927
        self.check_output_with_place(place, check_eager=True)
2928 2929 2930

    def test_check_grad(self):
        place = core.CUDAPlace(0)
2931 2932 2933
        self.check_grad_with_place(
            place, ['X'], 'Out', numeric_grad_delta=0.5, check_eager=True
        )
2934 2935


C
chengduo 已提交
2936
class TestPow(TestActivation):
2937 2938
    def setUp(self):
        self.op_type = "pow"
2939
        self.python_api = paddle.pow
2940
        self.check_eager = True
2941
        self.init_dtype()
2942
        self.init_shape()
2943

2944
        np.random.seed(1024)
2945
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2946 2947 2948
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
2949
        self.attrs = {'factor': 3.0}
2950
        self.outputs = {'Out': out}
2951

2952 2953 2954
    def test_check_output(self):
        self.check_output(check_eager=self.check_eager)

2955
    def test_check_grad(self):
2956 2957
        if self.dtype == np.float16:
            return
2958
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2959

2960

2961 2962 2963 2964 2965
class TestPow_ZeroDim(TestPow):
    def init_shape(self):
        self.shape = []


2966 2967 2968
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
2969 2970
        self.check_eager = False
        self.python_api = paddle.pow
2971 2972
        self.init_dtype()

2973
        np.random.seed(1024)
2974 2975 2976 2977 2978
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
2979
            'FactorTensor': np.array([3.0]).astype("float32"),
2980 2981 2982 2983 2984 2985
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
2986
        self.check_output(check_eager=self.check_eager)
2987 2988 2989 2990

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2991
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2992 2993 2994

    def test_api(self):
        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
G
GGBond8488 已提交
2995 2996
        x = paddle.static.data(name="x", shape=[11, 17], dtype="float32")
        res = paddle.static.data(name="res", shape=[11, 17], dtype="float32")
2997 2998 2999

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
3000 3001
        out_1 = paddle.pow(x, factor_1)
        out_2 = paddle.pow(x, factor_2)
3002 3003 3004
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
3005 3006

        exe = fluid.Executor(place=fluid.CPUPlace())
W
WuHaobo 已提交
3007
        res_1, res_2, res, res_6 = exe.run(
3008 3009
            fluid.default_main_program(),
            feed={"x": input},
3010 3011
            fetch_list=[out_1, out_2, res, out_6],
        )
3012

3013 3014 3015
        assert np.allclose(res_1, np.power(input, 2))
        assert np.allclose(res_2, np.power(input, 3))
        assert np.allclose(res_6, np.power(input, 3))
3016 3017


3018 3019 3020 3021 3022
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
3023
class TestSTanh(TestActivation):
3024 3025 3026 3027 3028 3029
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

3030 3031
    def setUp(self):
        self.op_type = "stanh"
3032
        self.init_dtype()
3033 3034
        self.init_shape()

3035 3036
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
3037

3038
        np.random.seed(1024)
3039
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
3040 3041
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
3042

3043
        self.inputs = {'X': x}
3044
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
3045
        self.outputs = {'Out': out}
3046

Q
qijun 已提交
3047
    def test_check_grad(self):
3048 3049
        if self.dtype == np.float16:
            return
3050
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
3051

3052

3053 3054 3055 3056 3057 3058 3059 3060 3061 3062
class TestSTanhScaleA(TestSTanh):
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
    def get_scale_b(self):
        return 0.5


3063 3064 3065 3066 3067
class TestSTanh_ZeroDim(TestSTanh):
    def init_shape(self):
        self.shape = []


3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080
class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
3081 3082 3083
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
3084
            else paddle.CPUPlace()
3085
        )
3086 3087 3088 3089 3090 3091 3092 3093 3094 3095

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out = paddle.stanh(x, self.scale_a, self.scale_b)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in res:
3096
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3097 3098 3099 3100 3101 3102 3103

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
3104
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3105 3106 3107 3108 3109 3110
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
3111
            out = paddle.stanh(x, self.scale_a, self.scale_b)
3112 3113 3114
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
3115
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3116

3117
    def test_errors(self):
3118 3119
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3120
            # The input type must be Variable.
3121
            self.assertRaises(TypeError, paddle.stanh, 1)
3122
            # The input dtype must be float16, float32, float64.
3123 3124 3125
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3126
            self.assertRaises(TypeError, paddle.stanh, x_int32)
3127
            # support the input dtype is float16
3128 3129 3130
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141
            paddle.stanh(x_fp16)


class TestSTanhAPIScaleA(TestSTanhAPI):
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
    def get_scale_b(self):
        return 0.5
3142 3143


3144 3145
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
3146 3147 3148 3149
    out = np.select(
        [x_beta <= threshold, x_beta > threshold],
        [np.log(1 + np.exp(x_beta)) / beta, x],
    )
3150 3151 3152
    return out


C
chengduo 已提交
3153
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
3154 3155
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
3156
        self.python_api = paddle.nn.functional.softplus
3157
        self.init_dtype()
3158
        self.init_shape()
3159

3160 3161
        beta = 2
        threshold = 15
3162

3163
        np.random.seed(1024)
3164
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3165 3166 3167
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
3168
        self.outputs = {'Out': out}
K
kexinzhao 已提交
3169

W
Wang Bojun 已提交
3170 3171
        self.check_eager = True

3172 3173 3174
    def init_shape(self):
        self.shape = [10, 12]

K
kexinzhao 已提交
3175
    def test_check_grad(self):
3176 3177
        if self.dtype == np.float16:
            return
W
Wang Bojun 已提交
3178 3179 3180
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
K
kexinzhao 已提交
3181

3182

3183 3184 3185 3186 3187
class TestSoftplus_ZeroDim(TestSoftplus):
    def init_shape(self):
        self.shape = []


3188 3189 3190
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217
class TestSoftplusBF16(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


3218 3219 3220 3221 3222
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
3223
        np.random.seed(1024)
3224
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3225 3226 3227
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3228
            else paddle.CPUPlace()
3229
        )
3230 3231

    def test_static_api(self):
3232
        paddle.enable_static()
3233
        with paddle.static.program_guard(paddle.static.Program()):
3234
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3235 3236 3237 3238 3239 3240 3241
            out1 = F.softplus(x, self.beta, self.threshold)
            softplus = paddle.nn.Softplus(self.beta, self.threshold)
            out2 = softplus(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in res:
3242
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3243 3244 3245 3246 3247 3248 3249 3250 3251

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
3252
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3253 3254 3255
        paddle.enable_static()

    def test_errors(self):
3256
        paddle.enable_static()
3257 3258 3259 3260
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softplus, 1)
            # The input dtype must be float16, float32, float64.
3261 3262 3263
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3264 3265
            self.assertRaises(TypeError, F.softplus, x_int32)
            # support the input dtype is float16
3266 3267 3268
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3269 3270 3271 3272 3273 3274 3275 3276
            F.softplus(x_fp16)


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
3277
class TestSoftsign(TestActivation):
3278 3279
    def setUp(self):
        self.op_type = "softsign"
3280
        self.init_dtype()
3281 3282
        self.init_shape()

3283
        self.python_api = paddle.nn.functional.softsign
3284

3285
        np.random.seed(1024)
3286
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3287 3288
        out = ref_softsign(x)
        self.inputs = {'X': x}
3289
        self.outputs = {'Out': out}
3290

3291 3292 3293
    def init_shape(self):
        self.shape = [10, 12]

3294
    def test_check_grad(self):
3295 3296
        if self.dtype == np.float16:
            return
3297
        self.check_grad(['X'], 'Out', check_eager=True)
3298 3299


3300 3301 3302 3303 3304
class TestSoftsign_ZeroDim(TestSoftsign):
    def init_shape(self):
        self.shape = []


3305 3306 3307
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
3308
        np.random.seed(1024)
3309
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3310 3311 3312
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3313
            else paddle.CPUPlace()
3314
        )
3315 3316

    def test_static_api(self):
3317
        paddle.enable_static()
3318
        with paddle.static.program_guard(paddle.static.Program()):
3319
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3320 3321 3322 3323 3324 3325 3326
            out1 = F.softsign(x)
            softsign = paddle.nn.Softsign()
            out2 = softsign(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softsign(self.x_np)
        for r in res:
3327
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3328 3329 3330 3331 3332 3333 3334 3335 3336

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
3337
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3338 3339 3340
        paddle.enable_static()

    def test_errors(self):
3341
        paddle.enable_static()
3342 3343 3344 3345
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softsign, 1)
            # The input dtype must be float16, float32, float64.
3346 3347 3348
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3349 3350
            self.assertRaises(TypeError, F.softsign, x_int32)
            # support the input dtype is float16
3351 3352 3353
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3354 3355 3356
            F.softsign(x_fp16)


3357 3358 3359 3360 3361
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
3362
class TestThresholdedRelu(TestActivation):
3363 3364
    def setUp(self):
        self.op_type = "thresholded_relu"
3365
        self.init_dtype()
3366
        self.init_shape()
3367

3368
        threshold = 15
3369

3370
        np.random.seed(1024)
3371
        x = np.random.uniform(-20, 20, self.shape).astype(self.dtype)
3372 3373 3374 3375
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"threshold": threshold}
3376
        self.outputs = {'Out': out}
3377

3378 3379 3380
    def init_shape(self):
        self.shape = [10, 12]

3381
    def test_check_grad(self):
3382 3383
        if self.dtype == np.float16:
            return
3384
        self.check_grad(['X'], 'Out')
3385 3386


3387 3388 3389 3390 3391
class TestThresholdedRelu_ZeroDim(TestThresholdedRelu):
    def init_shape(self):
        self.shape = []


3392 3393 3394 3395 3396 3397 3398
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
3399 3400 3401
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3402
            else paddle.CPUPlace()
3403
        )
3404 3405 3406 3407

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3408
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3409 3410 3411 3412 3413 3414 3415
            out1 = F.thresholded_relu(x, self.threshold)
            thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
            out2 = thresholded_relu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in res:
3416
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3417 3418 3419 3420 3421 3422 3423 3424 3425

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
3426
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3427 3428
        paddle.enable_static()

3429
    def test_errors(self):
3430 3431
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3432
            # The input type must be Variable.
3433
            self.assertRaises(TypeError, F.thresholded_relu, 1)
3434
            # The input dtype must be float16, float32, float64.
3435 3436 3437
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3438
            self.assertRaises(TypeError, F.thresholded_relu, x_int32)
3439
            # support the input dtype is float16
3440 3441 3442
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3443
            F.thresholded_relu(x_fp16)
3444 3445


3446
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
3447
    return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype)
3448 3449


C
chengduo 已提交
3450
class TestHardSigmoid(TestActivation):
3451 3452
    def setUp(self):
        self.op_type = "hard_sigmoid"
3453 3454 3455 3456
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
3457
        self.init_shape()
3458

3459
        x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
3460
        lower_threshold = -self.offset / self.slope
3461
        upper_threshold = (1.0 - self.offset) / self.slope
Z
zhupengyang 已提交
3462

3463
        # Same reason as TestAbs
3464 3465 3466
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
3467

3468
        out = ref_hardsigmoid(x, self.slope, self.offset)
3469

3470 3471
        self.attrs = {'slope': self.slope, 'offset': self.offset}
        self.inputs = {'X': x}
3472
        self.outputs = {'Out': out}
3473

3474 3475 3476
    def init_shape(self):
        self.shape = [10, 12]

3477 3478
    def set_attrs(self):
        pass
3479

3480

3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491
class TestHardSigmoidFP32(TestHardSigmoid):
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


3492 3493 3494 3495 3496
class TestHardSigmoid_ZeroDim(TestHardSigmoid):
    def init_shape(self):
        self.shape = []


3497 3498 3499 3500
class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3501 3502 3503
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3504
            else paddle.CPUPlace()
3505
        )
3506 3507 3508

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3509
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3510 3511 3512 3513 3514 3515 3516
            out1 = F.hardsigmoid(x)
            m = paddle.nn.Hardsigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardsigmoid(self.x_np)
        for r in res:
3517
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3518 3519 3520 3521 3522 3523 3524 3525 3526

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3527
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3528
        paddle.enable_static()
3529 3530 3531 3532

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3533
            out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3534 3535 3536
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
3537
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3538 3539 3540

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
3541
        out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3542
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3543 3544 3545 3546
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
3547
            # The input type must be Variable.
3548
            self.assertRaises(TypeError, F.hardsigmoid, 1)
3549
            # The input dtype must be float16, float32, float64.
3550 3551 3552
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3553
            self.assertRaises(TypeError, F.hardsigmoid, x_int32)
3554
            # support the input dtype is float16
3555 3556 3557
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3558
            F.hardsigmoid(x_fp16)
3559 3560


3561 3562 3563 3564 3565
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3566
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
3567 3568
    def setUp(self):
        self.op_type = "swish"
3569
        self.python_api = paddle.nn.functional.swish
3570
        self.init_dtype()
3571 3572
        self.init_shape()

3573
        self.check_eager = True
3574

3575
        np.random.seed(1024)
3576
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3577 3578
        out = ref_swish(x)
        self.inputs = {'X': x}
H
hong19860320 已提交
3579
        self.attrs = {'beta': 1.0}
3580
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
3581

3582 3583 3584
    def init_shape(self):
        self.shape = [10, 12]

A
Abhinav Arora 已提交
3585
    def test_check_grad(self):
3586 3587
        if self.dtype == np.float16:
            return
3588 3589 3590 3591
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
3592

A
Abhinav Arora 已提交
3593

3594 3595 3596 3597 3598
class TestSwish_ZeroDim(TestSwish):
    def init_shape(self):
        self.shape = []


3599 3600 3601 3602 3603
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3604 3605 3606
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3607
            else paddle.CPUPlace()
3608
        )
3609 3610 3611 3612

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3613
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3614 3615 3616 3617 3618 3619 3620
            out1 = F.swish(x)
            swish = paddle.nn.Swish()
            out2 = swish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_swish(self.x_np)
        for r in res:
3621
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3622

3623
    def test_dygraph_api(self):
3624 3625 3626 3627 3628 3629 3630
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3631
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3632 3633 3634 3635 3636 3637
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3638
            out = paddle.nn.functional.swish(x)
3639 3640 3641
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_swish(self.x_np)
3642
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3643

3644
    def test_errors(self):
3645 3646
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3647
            # The input type must be Variable.
3648
            self.assertRaises(TypeError, F.swish, 1)
3649
            # The input dtype must be float16, float32, float64.
3650 3651 3652
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3653
            self.assertRaises(TypeError, F.swish, x_int32)
3654
            # support the input dtype is float16
3655 3656 3657
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3658
            F.swish(x_fp16)
3659 3660


3661 3662 3663 3664
def ref_mish(x, threshold=20.0):
    softplus = np.select(
        [x <= threshold, x > threshold], [np.log(1 + np.exp(x)), x]
    )
3665 3666 3667 3668 3669 3670
    return x * np.tanh(softplus)


class TestMish(TestActivation):
    def setUp(self):
        self.op_type = "mish"
3671
        self.python_api = paddle.nn.functional.mish
3672
        self.init_dtype()
3673
        self.init_shape()
3674 3675

        np.random.seed(1024)
3676
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3677 3678 3679 3680
        out = ref_mish(x)
        self.inputs = {'X': x}
        self.outputs = {'Out': out}

3681 3682 3683
    def init_shape(self):
        self.shape = [10, 12]

3684 3685 3686
    def test_check_output(self):
        self.check_output(check_eager=True)

3687 3688 3689
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
3690
        self.check_grad(['X'], 'Out', check_eager=True)
3691 3692


3693 3694 3695 3696 3697
class TestMish_ZeroDim(TestMish):
    def init_shape(self):
        self.shape = []


3698 3699 3700 3701 3702
class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3703 3704 3705
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3706
            else paddle.CPUPlace()
3707
        )
3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.mish(x)
            mish = paddle.nn.Mish()
            out2 = mish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_mish(self.x_np)
        for r in res:
3720
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3721 3722 3723 3724 3725 3726 3727 3728 3729

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3730
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3731 3732 3733 3734 3735 3736
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3737
            out = paddle.nn.functional.mish(x)
3738 3739 3740
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_mish(self.x_np)
3741
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3742 3743 3744 3745 3746 3747 3748

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.mish, 1)
            # The input dtype must be float16, float32, float64.
3749 3750 3751
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3752 3753
            self.assertRaises(TypeError, F.mish, x_int32)
            # support the input dtype is float16
3754 3755 3756
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3757 3758 3759
            F.mish(x_fp16)


3760
# ------------------ Test Cudnn Activation----------------------
3761
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3762 3763 3764
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


3780 3781
# ------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(
3782 3783 3784 3785 3786 3787
    parent,
    atol=1e-3,
    grad_check=True,
    check_prim=False,
    enable_cinn=True,
    grad_atol=0.80,
3788 3789 3790 3791
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
C
chengduo 已提交
3792 3793 3794
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
3795

3796 3797 3798
        def if_skip_cinn(self):
            self.enable_cinn = enable_cinn

C
chengduo 已提交
3799
        def test_check_output(self):
3800
            place = core.CUDAPlace(0)
C
chengduo 已提交
3801 3802
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
3803 3804 3805
                self.check_output_with_place(
                    place, atol=atol, check_prim=check_prim
                )
3806

C
chengduo 已提交
3807 3808 3809 3810
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3811
                self.check_grad_with_place(
3812 3813 3814 3815 3816
                    place,
                    ['X'],
                    'Out',
                    check_prim=check_prim,
                    max_relative_error=grad_atol,
3817
                )
C
chengduo 已提交
3818 3819 3820 3821 3822 3823 3824

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
R
ronnywang 已提交
3825
create_test_act_fp16_class(TestExpm1)
3826 3827
create_test_act_fp16_class(TestSigmoid, check_prim=True)
create_test_act_fp16_class(TestSilu, check_prim=True)
C
chengduo 已提交
3828 3829
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
3830
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
3831
create_test_act_fp16_class(TestHardShrink)
3832
create_test_act_fp16_class(TestSoftshrink)
C
chengduo 已提交
3833
create_test_act_fp16_class(TestSqrt)
3834
create_test_act_fp16_class(TestAbs, check_prim=True)
C
chengduo 已提交
3835
create_test_act_fp16_class(TestCeil, grad_check=False)
3836
create_test_act_fp16_class(TestFloor, check_prim=True, grad_check=False)
C
chengduo 已提交
3837
create_test_act_fp16_class(TestCos, grad_atol=0.85)
J
joejiong 已提交
3838
create_test_act_fp16_class(TestTan, grad_atol=0.85)
3839
create_test_act_fp16_class(TestCosh, grad_atol=0.85)
3840
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
3841
create_test_act_fp16_class(TestSin)
3842
create_test_act_fp16_class(TestSinh)
3843 3844
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
X
xiaoting 已提交
3845 3846 3847
create_test_act_fp16_class(TestAcosh, grad_atol=0.85)
create_test_act_fp16_class(TestAsinh, grad_atol=0.85)
create_test_act_fp16_class(TestAtanh, grad_atol=0.85)
C
chengduo 已提交
3848
create_test_act_fp16_class(TestRound, grad_check=False)
K
Kang Zhao 已提交
3849
create_test_act_fp16_class(TestRelu, check_prim=True)
C
Clementine 已提交
3850
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
3851 3852
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
3853
create_test_act_fp16_class(TestSoftRelu, grad_atol=0.85)
C
chengduo 已提交
3854
create_test_act_fp16_class(TestELU)
3855
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
3856
create_test_act_fp16_class(TestReciprocal)
3857
create_test_act_fp16_class(TestLog, check_prim=True)
3858 3859 3860 3861
if core.is_compiled_with_rocm():
    create_test_act_fp16_class(TestLog2, atol=5e-2, grad_atol=0.85)
else:
    create_test_act_fp16_class(TestLog2, atol=5e-2)
J
joejiong 已提交
3862
create_test_act_fp16_class(TestLog10, atol=5e-2)
3863
create_test_act_fp16_class(TestLog1p, grad_atol=0.9)
C
chengduo 已提交
3864 3865
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
3866
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
3867 3868 3869 3870 3871
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
3872
create_test_act_fp16_class(TestSwish, grad_atol=0.85)
H
huangjun12 已提交
3873
create_test_act_fp16_class(TestHardSwish)
3874
create_test_act_fp16_class(TestMish, grad_atol=0.9)
A
Abhinav Arora 已提交
3875

3876

3877 3878 3879 3880 3881 3882
def create_test_act_bf16_class(
    parent, atol=1e-2, grad_check=True, grad_atol=0.80
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3883 3884 3885 3886 3887 3888 3889 3890 3891 3892
    class TestActBF16(parent):
        def init_dtype(self):
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=atol)

        def test_check_grad(self):
            place = core.CUDAPlace(0)
3893 3894 3895
            self.check_grad_with_place(
                place, ['X'], 'Out', max_relative_error=grad_atol
            )
3896 3897 3898 3899 3900 3901 3902

    cls_name = "{0}_{1}".format(parent.__name__, "bf16")
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


create_test_act_bf16_class(TestRelu)
3903
create_test_act_bf16_class(TestAbs)
3904

Q
qijun 已提交
3905 3906
if __name__ == "__main__":
    unittest.main()