test_activation_op.py 133.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
Q
qijun 已提交
16
import unittest
17
import warnings
J
joejiong 已提交
18

Q
qijun 已提交
19
import numpy as np
20
from eager_op_test import OpTest, convert_float_to_uint16
21 22
from scipy.special import erf, expit

23
import paddle
24
import paddle.nn.functional as F
25 26
from paddle import fluid, static
from paddle.fluid import Program, core, program_guard
27
from paddle.fluid.layer_helper import LayerHelper
Q
qijun 已提交
28 29


30
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
31
    def test_errors(self):
32
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46
            with program_guard(Program(), Program()):
                # The input type of sqrt op must be Variable or numpy.ndarray.
                in1 = 1
                self.assertRaises(TypeError, paddle.sqrt, in1)
                # The input dtype of sqrt op must be float16, float32, float64.
                in2 = paddle.static.data(
                    name='input2', shape=[-1, 12, 10], dtype="int32"
                )
                self.assertRaises(TypeError, paddle.sqrt, in2)

                in3 = paddle.static.data(
                    name='input3', shape=[-1, 12, 10], dtype="float16"
                )
                paddle.sqrt(x=in3)
Z
Zhaolong Xing 已提交
47 48


C
chengduo 已提交
49
class TestActivation(OpTest):
Q
qijun 已提交
50 51
    def setUp(self):
        self.op_type = "exp"
52
        self.init_dtype()
53
        self.init_shape()
54
        self.init_kernel_type()
55
        self.if_enable_cinn()
C
chentianyu03 已提交
56
        self.python_api = paddle.exp
57
        self.public_python_api = paddle.exp
58

59
        np.random.seed(2049)
60
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
61 62 63 64
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
65

66 67
        self.convert_input_output()

Q
qijun 已提交
68
    def test_check_output(self):
W
wanghuancoder 已提交
69
        self.check_output()
Q
qijun 已提交
70 71

    def test_check_grad(self):
72 73
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
74 75 76 77
        self.check_grad(
            ['X'],
            'Out',
        )
Q
qijun 已提交
78

79
    def init_dtype(self):
80
        self.dtype = np.float64
81

82 83 84
    def init_shape(self):
        self.shape = [11, 17]

85 86 87
    def init_kernel_type(self):
        pass

88 89 90
    def convert_input_output(self):
        pass

91 92 93
    def if_enable_cinn(self):
        pass

Q
qijun 已提交
94

95 96 97 98 99
class TestActivation_ZeroDim(TestActivation):
    def init_shape(self):
        self.shape = []


100
class TestExpFp32_Prim(OpTest):
101 102 103 104 105 106
    def setUp(self):
        self.op_type = "exp"
        self.prim_op_type = "prim"
        self.init_dtype()
        self.init_shape()
        self.python_api = paddle.exp
107
        self.public_python_api = paddle.exp
108 109 110 111 112 113 114

        np.random.seed(2049)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
115
        self.if_enable_cinn()
116 117 118 119 120 121 122 123 124 125 126 127 128

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)

    def init_dtype(self):
        self.dtype = np.float32

    def init_shape(self):
        self.shape = [12, 17]

129
    def if_enable_cinn(self):
130
        pass
131 132


133
class TestExpFp64_Prim(TestExpFp32_Prim):
134 135 136 137
    def init_dtype(self):
        self.dtype = np.float64


138
class TestExpPrim_ZeroDim(TestExpFp32_Prim):
139 140 141
    def init_shape(self):
        self.shape = []

142
    def if_enable_cinn(self):
143 144 145
        self.enable_cinn = False


R
ronnywang 已提交
146 147 148
class TestExpm1(TestActivation):
    def setUp(self):
        self.op_type = "expm1"
149
        self.python_api = paddle.expm1
R
ronnywang 已提交
150
        self.init_dtype()
151
        self.init_shape()
R
ronnywang 已提交
152 153

        np.random.seed(2049)
154
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
R
ronnywang 已提交
155 156 157 158
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
159
        self.convert_input_output()
R
ronnywang 已提交
160 161

    def test_check_grad(self):
W
wanghuancoder 已提交
162
        self.check_grad(['X'], 'Out')
163 164

    def test_check_output(self):
W
wanghuancoder 已提交
165
        self.check_output()
R
ronnywang 已提交
166 167


168 169 170 171 172
class TestExpm1_ZeroDim(TestExpm1):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
class TestExpm1API(unittest.TestCase):
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        def run(place):
189
            with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
190
                with paddle.static.program_guard(paddle.static.Program()):
191
                    X = paddle.static.data('X', self.shape, dtype=self.dtype)
W
wanghuancoder 已提交
192 193 194
                    out = paddle.expm1(X)
                    exe = paddle.static.Executor(place)
                    res = exe.run(feed={'X': self.x})
R
ronnywang 已提交
195
            for r in res:
196
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
197 198 199 200 201 202 203 204

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
        def run(place):
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
205
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
206 207 208 209 210

        for place in self.place:
            run(place)

    def test_errors(self):
211
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
212
            with paddle.static.program_guard(paddle.static.Program()):
213
                X = paddle.static.data('X', self.shape, dtype='int32')
W
wanghuancoder 已提交
214
                self.assertRaises(TypeError, paddle.expm1, X)
R
ronnywang 已提交
215 216 217
        # The input dtype must be float16, float32, float64.


218
class TestParameter:
219
    def test_out_name(self):
220
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
221 222 223 224 225 226 227 228 229 230 231
            with fluid.program_guard(fluid.Program()):
                np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
                data = paddle.static.data(
                    name="X", shape=[-1, 1], dtype="float32"
                )
                out = eval("paddle.%s(data, name='Y')" % self.op_type)
                place = fluid.CPUPlace()
                exe = fluid.Executor(place)
                (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
                expected = eval("np.%s(np_x)" % self.op_type)
                np.testing.assert_allclose(result, expected, rtol=1e-05)
232 233 234 235 236 237 238

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
239
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
240 241


C
chengduo 已提交
242
class TestSigmoid(TestActivation):
Q
qijun 已提交
243 244
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
245 246
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.sigmoid
247
        self.public_python_api = paddle.nn.functional.sigmoid
248
        self.init_dtype()
249
        self.init_shape()
250
        self.if_enable_cinn()
251
        np.random.seed(1024)
252
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
253 254 255 256
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
257

258 259
        self.convert_input_output()

260 261 262
    def init_dtype(self):
        self.dtype = np.float32

263 264 265
    def if_enable_cinn(self):
        pass

266
    def test_check_grad(self):
267 268
        if self.dtype == np.float16:
            return
Z
zxcd 已提交
269
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True)
270

271

272 273 274 275
class TestSigmoid_ZeroDim(TestSigmoid):
    def init_shape(self):
        self.shape = []

276 277 278
    def if_enable_cinn(self):
        self.enable_cinn = False

279

280 281 282
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
283 284 285
class TestSigmoidBF16(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
286 287
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.sigmoid
288
        self.public_python_api = paddle.nn.functional.sigmoid
289
        self.init_dtype()
290
        self.init_shape()
291
        self.if_enable_cinn()
292
        np.random.seed(1024)
293
        x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
294 295 296 297 298 299 300 301 302 303
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

304 305 306
    def init_shape(self):
        self.shape = [11, 17]

307 308 309
    def if_enable_cinn(self):
        self.enable_cinn = False

310 311
    def test_check_output(self):
        place = core.CUDAPlace(0)
312
        # elementwise_pow doesn't support bfloat16, skip check_prim here.
313
        self.check_output_with_place(place, check_prim=True)
314 315 316

    def test_check_grad(self):
        place = core.CUDAPlace(0)
317
        self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
318 319


320 321 322 323 324 325 326 327
'''
class TestSigmoidBF16_ZeroDim(TestSigmoidBF16):

    def init_shape(self):
        self.shape = []
'''


M
minghaoBD 已提交
328 329 330
class TestSilu(TestActivation):
    def setUp(self):
        self.op_type = "silu"
Z
zxcd 已提交
331 332
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.silu
333
        self.public_python_api = paddle.nn.functional.silu
M
minghaoBD 已提交
334
        self.init_dtype()
335
        self.init_shape()
336
        self.if_enable_cinn()
M
minghaoBD 已提交
337 338

        np.random.seed(1024)
339
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
M
minghaoBD 已提交
340
        out = x / (np.exp(-x) + 1)
341
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
M
minghaoBD 已提交
342 343
        self.outputs = {'Out': out}

344 345
        self.convert_input_output()

M
minghaoBD 已提交
346 347 348
    def init_dtype(self):
        self.dtype = np.float32

349
    def if_enable_cinn(self):
350 351
        pass

M
minghaoBD 已提交
352
    def test_check_grad(self):
Z
zxcd 已提交
353
        self.check_grad(['X'], 'Out', check_prim=True)
M
minghaoBD 已提交
354 355


356 357 358
class TestSilu_ZeroDim(TestSilu):
    def init_shape(self):
        self.shape = []
Z
zxcd 已提交
359

360
    def if_enable_cinn(self):
361
        self.enable_cinn = False
Z
zxcd 已提交
362 363


M
minghaoBD 已提交
364 365 366 367
class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
368 369 370
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
M
minghaoBD 已提交
371
            else paddle.CPUPlace()
372
        )
M
minghaoBD 已提交
373 374

    def test_static_api(self):
375
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
376
            with paddle.static.program_guard(paddle.static.Program()):
377
                x = paddle.static.data('X', [11, 17])
W
wanghuancoder 已提交
378 379 380 381 382 383 384 385
                out1 = F.silu(x)
                m = paddle.nn.Silu()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = self.x_np / (1 + np.exp(-self.x_np))
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
386 387 388 389 390 391 392 393

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
394
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
395 396

    def test_errors(self):
397
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
398 399 400 401
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.silu, 1)
                # The input dtype must be float16, float32, float64.
402
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
403 404 405 406
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.silu, x_int32)
                # support the input dtype is float16
407
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
408 409 410
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.silu(x_fp16)
M
minghaoBD 已提交
411 412


C
chengduo 已提交
413
class TestLogSigmoid(TestActivation):
414 415
    def setUp(self):
        self.op_type = "logsigmoid"
W
wanghuancoder 已提交
416
        self.python_api = paddle.nn.functional.log_sigmoid
417
        self.init_dtype()
418
        self.init_shape()
419

420
        np.random.seed(2048)
421
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
422
        out = np.log(1 / (1 + np.exp(-x)))
423
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
424
        self.outputs = {'Out': out}
425

426 427
        self.convert_input_output()

428
    def test_check_grad(self):
429 430
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
431
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
432 433


434 435 436 437 438
class TestLogSigmoid_ZeroDim(TestLogSigmoid):
    def init_shape(self):
        self.shape = []


439
class TestLogSigmoidAPI(unittest.TestCase):
440
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
441
    def setUp(self):
442
        np.random.seed(1024)
443
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
444 445 446
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
447
            else paddle.CPUPlace()
448
        )
449 450

    def test_static_api(self):
451
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
452
            with paddle.static.program_guard(paddle.static.Program()):
453
                x = paddle.static.data('X', [11, 17])
W
wanghuancoder 已提交
454 455 456 457 458 459 460 461
                out1 = F.log_sigmoid(x)
                m = paddle.nn.LogSigmoid()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
462 463 464

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
465
        out1 = F.log_sigmoid(x)
466 467 468 469
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
470
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
471 472

    def test_errors(self):
473
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
474 475 476 477
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.log_sigmoid, 1)
                # The input dtype must be float16, float32, float64.
478
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
479 480 481 482
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.log_sigmoid, x_int32)
                # support the input dtype is float16
483
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
484 485 486
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.log_sigmoid(x_fp16)
487 488


489
class TestTanh(TestActivation, TestParameter):
490 491
    def setUp(self):
        self.op_type = "tanh"
492
        self.prim_op_type = "prim"
W
wanghuancoder 已提交
493
        self.python_api = paddle.tanh
494
        self.public_python_api = paddle.tanh
495
        self.init_dtype()
496
        self.init_shape()
497
        self.if_enable_cinn()
498

499
        np.random.seed(1024)
500
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
501 502 503
        out = np.tanh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
504
        self.convert_input_output()
505 506

    def test_check_grad(self):
507 508
        if self.dtype == np.float16:
            return
509
        self.check_grad(['X'], 'Out', check_prim=True)
510

511
    def init_dtype(self):
512
        # TODO If dtype is float64, the output (Out) has diff at CPUPlace
513 514 515 516
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

517 518 519
    def if_enable_cinn(self):
        pass

520

521 522 523 524
class TestTanh_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []

525 526 527
    def if_enable_cinn(self):
        self.enable_cinn = False

528

W
WangXi 已提交
529 530 531 532
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
533
        np.random.seed(1024)
W
WangXi 已提交
534
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
535 536 537
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
W
WangXi 已提交
538
            else paddle.CPUPlace()
539
        )
540 541 542 543
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
544 545

    def test_static_api(self):
546
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
547
            with paddle.static.program_guard(paddle.static.Program()):
548
                x = paddle.static.data('X', [10, 12], self.dtype)
W
wanghuancoder 已提交
549 550 551 552 553 554 555 556
                out1 = self.tanh(x)
                th = paddle.nn.Tanh()
                out2 = th(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.tanh(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
557 558

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
559
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
560 561 562 563 564 565
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
566
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
567 568

    def test_errors(self):
569
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
570 571 572 573
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.tanh, 1)
                # The input dtype must be float16, float32.
574
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
575 576 577 578
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, self.tanh, x_int32)
                # support the input dtype is float16
579
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
580 581 582
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                self.tanh(x_fp16)
583 584 585 586 587 588


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
589 590


591
class TestAtan(TestActivation, TestParameter):
592 593
    def setUp(self):
        self.op_type = "atan"
W
wanghuancoder 已提交
594
        self.python_api = paddle.atan
595
        self.init_dtype()
596
        self.init_shape()
597

598
        np.random.seed(1024)
599
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
600 601 602 603
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
604
        self.convert_input_output()
605 606 607 608

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
609
        self.check_grad(['X'], 'Out')
610

W
WuHaobo 已提交
611
    def test_out_name(self):
612
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
613 614 615 616 617 618 619 620 621 622 623
            with fluid.program_guard(fluid.Program()):
                np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
                data = paddle.static.data(
                    name="X", shape=[-1, 1], dtype="float32"
                )
                out = paddle.atan(data, name='Y')
                place = fluid.CPUPlace()
                exe = fluid.Executor(place)
                (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
                expected = np.arctan(np_x)
                self.assertEqual(result, expected)
W
WuHaobo 已提交
624

625 626 627 628 629 630 631 632
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

633

634
class TestAtan_ZeroDim(TestAtan):
635 636 637 638
    def init_shape(self):
        self.shape = []


639 640 641
class TestSinh(TestActivation):
    def setUp(self):
        self.op_type = "sinh"
W
wanghuancoder 已提交
642
        self.python_api = paddle.sinh
643
        self.init_dtype()
644
        self.init_shape()
645

646
        np.random.seed(1024)
647
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
648 649 650 651
        out = np.sinh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

652 653
        self.convert_input_output()

654 655 656 657 658
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

659 660 661 662 663 664 665

class TestSinh_ZeroDim(TestSinh):
    def init_shape(self):
        self.shape = []


class TestSinhAPI(unittest.TestCase):
666 667 668 669
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
670
            z = paddle.sinh(x).numpy()
671
            z_expected = np.sinh(np_x)
672
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
673 674

    def test_api(self):
675
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
            test_data_shape = [11, 17]
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                    "float32"
                )
                data_x = paddle.static.data(
                    name="data_x",
                    shape=test_data_shape,
                    dtype="float32",
                )

                pd_sinh_out = paddle.sinh(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (np_sinh_res,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[pd_sinh_out],
                )

            expected_res = np.sinh(input_x)
            np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
698 699 700 701

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
702 703 704
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
705 706
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
707
            loss = paddle.sinh(var)
708 709 710 711 712 713 714
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
715
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
716 717 718 719
            with program_guard(Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.sinh, 1)
                # The input dtype must be float16, float32, float64.
720
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
721 722 723 724
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.sinh, x_int32)
                # support the input dtype is float16
725
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
726 727 728
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.sinh(x_fp16)
729 730 731 732 733


class TestCosh(TestActivation):
    def setUp(self):
        self.op_type = "cosh"
W
wanghuancoder 已提交
734
        self.python_api = paddle.cosh
735
        self.init_dtype()
736
        self.init_shape()
737

738
        np.random.seed(1024)
739
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
740 741 742 743
        out = np.cosh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

744 745
        self.convert_input_output()

746 747 748 749 750
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

751 752 753 754 755 756 757

class TestCosh_ZeroDim(TestCosh):
    def init_shape(self):
        self.shape = []


class TestCoshAPI(unittest.TestCase):
758 759 760 761
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
762
            z = paddle.cosh(x).numpy()
763
            z_expected = np.cosh(np_x)
764
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
765 766

    def test_api(self):
767
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
            test_data_shape = [11, 17]
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                    "float32"
                )
                data_x = paddle.static.data(
                    name="data_x",
                    shape=test_data_shape,
                    dtype="float32",
                )

                pd_cosh_out = paddle.cosh(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (np_cosh_res,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[pd_cosh_out],
                )

            expected_res = np.cosh(input_x)
            np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
790 791 792 793

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
794 795 796
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
797 798
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
799
            loss = paddle.cosh(var)
800 801 802 803 804 805 806
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
807
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
808 809 810 811
            with program_guard(Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.cosh, 1)
                # The input dtype must be float16, float32, float64.
812
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
813 814 815 816
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.cosh, x_int32)
                # support the input dtype is float16
817
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
818 819 820
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.cosh(x_fp16)
821 822


823 824 825 826 827 828
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
K
Kavya Srinet 已提交
829 830
    def setUp(self):
        self.op_type = "tanh_shrink"
W
wanghuancoder 已提交
831
        self.python_api = paddle.nn.functional.tanhshrink
832
        self.init_dtype()
833
        self.init_shape()
834

835
        np.random.seed(1024)
836
        x = np.random.uniform(10, 20, self.shape).astype(self.dtype)
837
        out = ref_tanhshrink(x)
838
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
839
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
840

841 842
        self.convert_input_output()

K
Kavya Srinet 已提交
843
    def test_check_grad(self):
844 845
        if self.dtype == np.float16:
            return
846
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
847

848

849 850 851 852 853
class TestTanhshrink_ZeroDim(TestTanhshrink):
    def init_shape(self):
        self.shape = []


854 855 856
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
857
        np.random.seed(1024)
858
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
859 860 861
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
862
            else paddle.CPUPlace()
863
        )
864 865

    def test_static_api(self):
866
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
867
            with paddle.static.program_guard(paddle.static.Program()):
868
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
869 870 871 872 873 874 875 876
                out1 = F.tanhshrink(x)
                tanhshrink = paddle.nn.Tanhshrink()
                out2 = tanhshrink(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_tanhshrink(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
877 878 879 880 881 882 883 884

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
885
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
886 887

    def test_errors(self):
888
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
889 890 891 892
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.tanhshrink, 1)
                # The input dtype must be float16, float32, float64.
893
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
894 895 896 897
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.tanhshrink, x_int32)
                # support the input dtype is float16
898
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
899 900 901
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.tanhshrink(x_fp16)
902 903


904 905 906 907 908 909
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
910
class TestHardShrink(TestActivation):
911 912
    def setUp(self):
        self.op_type = "hard_shrink"
W
wanghuancoder 已提交
913
        self.python_api = paddle.nn.functional.hardshrink
914
        self.init_dtype()
915
        self.init_shape()
916

917 918
        self.threshold = 0.5
        self.set_attrs()
919
        np.random.seed(1024)
920
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10
921
        out = ref_hardshrink(x, self.threshold)
922 923
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
924

925
        self.attrs = {'threshold': self.threshold}
926 927

        self.convert_input_output()
928

929 930 931
    def init_shape(self):
        self.shape = [10, 12]

932 933 934
    def set_attrs(self):
        pass

935
    def test_check_grad(self):
936 937
        if self.dtype == np.float16:
            return
938
        self.check_grad(['X'], 'Out')
939 940


941 942 943 944 945
class TestHardShrink_threshold_negative(TestHardShrink):
    def set_attrs(self):
        self.threshold = -0.1


946 947 948 949 950 951 952 953
'''
class TestHardShrink_ZeroDim(TestHardShrink):

    def init_shape(self):
        self.shape = []
'''


954 955 956
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
957
        np.random.seed(1024)
958
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
959 960 961
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
962
            else paddle.CPUPlace()
963
        )
964 965

    def test_static_api(self):
966
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
967
            with paddle.static.program_guard(paddle.static.Program()):
968
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
969 970 971 972 973 974 975 976
                out1 = F.hardshrink(x)
                hd = paddle.nn.Hardshrink()
                out2 = hd(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardshrink(self.x_np, 0.5)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
977 978

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
979
        x = paddle.to_tensor(self.x_np)
980 981 982 983 984
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
985
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
986 987 988 989 990 991

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
992
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
993

994
    def test_errors(self):
995
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
996 997 998 999
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardshrink, 1)
                # The input dtype must be float16, float32, float64.
1000
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1001 1002 1003 1004
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardshrink, x_int32)
                # support the input dtype is float16
1005
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1006 1007 1008
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardshrink(x_fp16)
1009 1010


1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
1022
        np.random.seed(1024)
1023
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
1024 1025 1026
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1027
            else paddle.CPUPlace()
1028
        )
1029 1030

    def test_static_api(self):
1031
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1032
            with paddle.static.program_guard(paddle.static.Program()):
1033
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
1034 1035 1036 1037 1038 1039 1040 1041
                out1 = F.hardtanh(x)
                m = paddle.nn.Hardtanh()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardtanh(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1042 1043

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
1044
        x = paddle.to_tensor(self.x_np)
1045 1046 1047 1048 1049
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
1050
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1051 1052 1053 1054 1055 1056

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
1057
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1058 1059

    def test_errors(self):
1060
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1061 1062 1063 1064
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardtanh, 1)
                # The input dtype must be float16, float32, float64.
1065
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1066 1067 1068 1069
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardtanh, x_int32)
                # support the input dtype is float16
1070
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1071 1072 1073
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardtanh(x_fp16)
1074 1075


1076 1077 1078
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
1079 1080
        out - threshold
    )
1081 1082 1083 1084
    return out


class TestSoftshrink(TestActivation):
1085 1086
    def setUp(self):
        self.op_type = "softshrink"
1087
        self.python_api = paddle.nn.functional.softshrink
1088
        self.init_dtype()
1089
        self.init_shape()
1090

1091
        threshold = 0.8
1092

1093
        np.random.seed(1023)
1094
        x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype)
1095
        out = ref_softshrink(x, threshold)
1096 1097

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
1098
        self.outputs = {'Out': out}
1099

1100 1101
        self.attrs = {"lambda": threshold}

1102
    def test_check_grad(self):
1103 1104
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1105
        self.check_grad(['X'], 'Out')
1106

1107

1108 1109 1110 1111 1112
class TestSoftshrink_ZeroDim(TestSoftshrink):
    def init_shape(self):
        self.shape = []


1113 1114 1115 1116
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
1117
        np.random.seed(1024)
1118
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
1119 1120 1121
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1122
            else paddle.CPUPlace()
1123
        )
1124 1125

    def test_static_api(self):
1126
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1127
            with paddle.static.program_guard(paddle.static.Program()):
1128
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
1129 1130 1131 1132 1133 1134 1135 1136
                out1 = F.softshrink(x, self.threshold)
                softshrink = paddle.nn.Softshrink(self.threshold)
                out2 = softshrink(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softshrink(self.x_np, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1137 1138 1139 1140 1141 1142 1143 1144

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
1145
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1146

1147
    def test_errors(self):
1148
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1149 1150 1151 1152
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softshrink, 1)
                # The input dtype must be float16, float32, float64.
1153
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1154 1155 1156 1157
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softshrink, x_int32)
                # The threshold must be no less than zero
1158
                x_fp32 = paddle.static.data(
W
wanghuancoder 已提交
1159 1160 1161 1162
                    name='x_fp32', shape=[12, 10], dtype='float32'
                )
                self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
                # support the input dtype is float16
1163
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1164 1165 1166
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softshrink(x_fp16)
1167 1168


1169
class TestSqrt(TestActivation, TestParameter):
1170 1171
    def setUp(self):
        self.op_type = "sqrt"
1172
        self.prim_op_type = "prim"
1173
        self.python_api = paddle.sqrt
1174 1175
        self.public_python_api = paddle.sqrt

1176
        self.init_dtype()
1177
        self.init_shape()
1178
        self.if_enable_cinn()
1179

1180
        np.random.seed(1023)
1181
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
1182 1183 1184 1185
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1186
        self.convert_input_output()
1187

1188 1189 1190
    def if_enable_cinn(self):
        pass

1191
    def test_check_grad(self):
1192 1193
        if self.dtype == np.float16:
            return
1194
        self.check_grad(['X'], 'Out', check_prim=True)
1195 1196

    def test_check_output(self):
W
wanghuancoder 已提交
1197
        self.check_output()
1198

1199

1200 1201 1202 1203 1204
class TestSqrtPrimFp32(TestActivation):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "prim"
        self.python_api = paddle.sqrt
1205
        self.public_python_api = paddle.sqrt
1206 1207
        self.init_dtype()
        self.init_shape()
1208
        self.if_enable_cinn()
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1219
        self.check_grad(['X'], 'Out', check_prim=True)
1220 1221

    def test_check_output(self):
W
wanghuancoder 已提交
1222
        self.check_output()
1223 1224 1225 1226

    def init_dtype(self):
        self.dtype = np.float32

1227 1228 1229
    def if_enable_cinn(self):
        pass

1230

1231 1232 1233 1234
class TestSqrt_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []

1235
    def if_enable_cinn(self):
1236
        self.enable_cinn = False
1237 1238


1239 1240 1241
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
1242 1243 1244
class TestSqrtBF16(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
1245
        self.prim_op_type = "prim"
1246
        self.python_api = paddle.sqrt
1247
        self.public_python_api = paddle.sqrt
1248
        self.init_dtype()
1249
        self.init_shape()
1250
        self.if_enable_cinn()
1251 1252

        np.random.seed(1023)
1253
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

1264 1265 1266
    def init_shape(self):
        self.shape = [11, 17]

1267 1268 1269
    def if_enable_cinn(self):
        self.enable_cinn = False

1270 1271
    def test_check_output(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
1272
        self.check_output_with_place(place)
1273 1274 1275

    def test_check_grad(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
1276
        self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
1277 1278


M
mhy-666 已提交
1279 1280 1281 1282 1283
class TestSqrtComp(TestActivation, TestParameter):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "comp"
        self.python_api = paddle.sqrt
1284
        self.public_python_api = paddle.sqrt
M
mhy-666 已提交
1285 1286
        self.init_dtype()
        self.init_shape()
1287
        self.if_enable_cinn()
M
mhy-666 已提交
1288 1289 1290 1291 1292 1293 1294

        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1295
        self.convert_input_output()
1296 1297 1298

    def if_enable_cinn(self):
        pass
M
mhy-666 已提交
1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=True, check_prim=True)

    def test_check_output(self):
        self.check_output(check_dygraph=True, check_prim=True)


class TestSqrtCompFp32(TestActivation):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "comp"
        self.python_api = paddle.sqrt
1314
        self.public_python_api = paddle.sqrt
M
mhy-666 已提交
1315 1316
        self.init_dtype()
        self.init_shape()
1317
        self.if_enable_cinn()
M
mhy-666 已提交
1318 1319 1320 1321 1322 1323
        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1324 1325 1326

    def if_enable_cinn(self):
        pass
M
mhy-666 已提交
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=True, check_prim=True)

    def test_check_output(self):
        self.check_output(check_dygraph=True, check_prim=True)

    def init_dtype(self):
        self.dtype = np.float32


Z
zhoukunsheng 已提交
1340 1341 1342
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
1343
        self.prim_op_type = "comp"
Z
zyfncg 已提交
1344
        self.python_api = paddle.rsqrt
1345
        self.public_python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1346
        self.init_dtype()
1347
        self.init_shape()
1348
        self.if_enable_cinn()
Z
zhoukunsheng 已提交
1349

1350
        np.random.seed(1024)
1351
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
Z
zhoukunsheng 已提交
1352 1353 1354 1355
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1356
        self.convert_input_output()
Z
zhoukunsheng 已提交
1357

1358 1359 1360
    def init_shape(self):
        self.shape = [10, 12]

1361 1362 1363
    def if_enable_cinn(self):
        pass

1364 1365 1366
    def test_check_output(self):
        self.check_output(check_prim=True)

Z
zhoukunsheng 已提交
1367 1368 1369
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1370 1371 1372 1373 1374 1375
        self.check_grad(
            ['X'],
            'Out',
            max_relative_error=0.0005,
            check_prim=True,
        )
Z
zhoukunsheng 已提交
1376 1377


1378 1379 1380
class TestRsqrt_ZeroDim(TestRsqrt):
    def init_shape(self):
        self.shape = []
1381 1382 1383

    def if_enable_cinn(self):
        self.enable_cinn = False
1384 1385


C
chengduo 已提交
1386
class TestAbs(TestActivation):
1387 1388
    def setUp(self):
        self.op_type = "abs"
1389 1390
        self.prim_op_type = "prim"
        self.python_api = paddle.abs
1391
        self.public_python_api = paddle.abs
1392
        self.init_dtype()
1393
        self.init_shape()
1394
        self.if_enable_cinn()
1395

1396
        np.random.seed(1024)
1397
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
C
chengduo 已提交
1398
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1399
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1400
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1401 1402
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1403 1404 1405 1406
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1407
        self.convert_input_output()
1408

1409 1410 1411
    def init_shape(self):
        self.shape = [4, 25]

1412 1413 1414
    def if_enable_cinn(self):
        pass

1415
    def test_check_grad(self):
1416 1417
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1418
        self.check_grad(['X'], 'Out', check_prim=True)
1419

1420

1421 1422 1423 1424
class TestAbs_ZeroDim(TestAbs):
    def init_shape(self):
        self.shape = []

1425 1426 1427
    def if_enable_cinn(self):
        self.enable_cinn = False

1428

C
chengduo 已提交
1429
class TestCeil(TestActivation):
D
dzhwinter 已提交
1430 1431
    def setUp(self):
        self.op_type = "ceil"
1432
        self.python_api = paddle.ceil
1433
        self.init_dtype()
1434
        self.init_shape()
1435

1436
        np.random.seed(1024)
1437
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1438 1439 1440 1441
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1442
        self.convert_input_output()
D
dzhwinter 已提交
1443

1444 1445 1446
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1447
    # The same reason with TestFloor
C
chengduo 已提交
1448
    def test_check_grad(self):
1449 1450 1451
        pass


1452 1453 1454 1455 1456
class TestCeil_ZeroDim(TestCeil):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1457
class TestFloor(TestActivation):
D
dzhwinter 已提交
1458 1459
    def setUp(self):
        self.op_type = "floor"
1460
        self.prim_op_type = "prim"
1461
        self.python_api = paddle.floor
1462
        self.public_python_api = paddle.floor
1463
        self.init_dtype()
1464
        self.init_shape()
1465
        self.if_enable_cinn()
1466

1467
        np.random.seed(1024)
1468
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1469 1470 1471 1472
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1473
        self.convert_input_output()
D
dzhwinter 已提交
1474

1475 1476 1477
    def init_shape(self):
        self.shape = [10, 12]

1478 1479 1480
    def if_enable_cinn(self):
        pass

D
dzhwinter 已提交
1481
    # the gradient on floor, ceil, round is undefined.
1482
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1483 1484
    # The same reason with TestFloor
    def test_check_grad(self):
1485 1486
        pass

1487
    def test_check_grad_for_prim(self):
1488 1489 1490 1491
        # the gradient on floor, ceil, round is undefined.
        # we return zero as gradient, but the numpy return nan.
        # for prim, we compare result with eager python api,
        # so, we use only_prim flag to express we only test prim.
1492 1493 1494 1495 1496 1497 1498 1499
        if core.is_compiled_with_cuda():
            self.check_grad_with_place(
                paddle.CUDAPlace(0),
                ['X'],
                'Out',
                check_prim=True,
                only_check_prim=True,
            )
1500 1501


1502
class TestFloor_ZeroDim(TestFloor):
1503 1504 1505
    def init_shape(self):
        self.shape = []

1506 1507
    def if_enable_cinn(self):
        self.enable_cinn = False
1508 1509


C
chengduo 已提交
1510
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
1511 1512
    def setUp(self):
        self.op_type = "cos"
W
wanghuancoder 已提交
1513
        self.python_api = paddle.cos
1514 1515
        self.public_python_api = paddle.cos
        self.prim_op_type = "prim"
1516
        self.init_dtype()
1517
        self.init_shape()
1518 1519
        # prim not support now
        self.enable_cinn = False
1520

1521
        np.random.seed(1024)
1522
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1523 1524 1525
        out = np.cos(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1526
        self.convert_input_output()
C
add sin  
chengduoZH 已提交
1527

1528 1529 1530
    def init_shape(self):
        self.shape = [10, 12]

C
add sin  
chengduoZH 已提交
1531
    def test_check_grad(self):
1532 1533
        if self.dtype == np.float16:
            return
1534
        self.check_grad(['X'], 'Out', check_prim=True)
C
add sin  
chengduoZH 已提交
1535

1536

1537 1538 1539 1540 1541
class TestCos_ZeroDim(TestCos):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
1542 1543 1544 1545
class TestTan(TestActivation):
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
W
wanghuancoder 已提交
1546
        self.python_api = paddle.tan
J
joejiong 已提交
1547
        self.init_dtype()
1548 1549
        self.init_shape()

J
joejiong 已提交
1550
        self.dtype = 'float32'
1551
        self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1552 1553 1554
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
J
joejiong 已提交
1555
            else paddle.CPUPlace()
1556
        )
J
joejiong 已提交
1557 1558 1559 1560 1561

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}
1562
        self.convert_input_output()
J
joejiong 已提交
1563

1564 1565 1566
    def init_shape(self):
        self.shape = [10, 12]

J
joejiong 已提交
1567 1568 1569 1570 1571
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582

class TestTan_ZeroDim(TestTan):
    def init_shape(self):
        self.shape = []


class TestTanAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(1024)
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1583 1584 1585
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1586
            else paddle.CPUPlace()
1587
        )
1588

J
joejiong 已提交
1589 1590 1591 1592
    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1593
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1594 1595

    def test_static_api(self):
1596
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1597 1598 1599 1600 1601 1602 1603
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', [11, 17], self.dtype)
                out = paddle.tan(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = np.tan(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1604 1605 1606 1607

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
1608 1609 1610
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
J
joejiong 已提交
1611 1612 1613 1614 1615 1616 1617 1618
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1619 1620 1621
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
W
wanghuancoder 已提交
1622
        self.python_api = paddle.acos
1623
        self.init_dtype()
1624
        self.init_shape()
1625

1626
        np.random.seed(1024)
1627
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1628 1629 1630 1631
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1632
        self.convert_input_output()
1633

1634 1635 1636
    def init_shape(self):
        self.shape = [10, 12]

1637 1638 1639
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1640
        self.check_grad(['X'], 'Out')
1641 1642


1643 1644 1645 1646 1647
class TestAcos_ZeroDim(TestAcos):
    def init_shape(self):
        self.shape = []


1648
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
1649 1650
    def setUp(self):
        self.op_type = "sin"
W
wanghuancoder 已提交
1651
        self.python_api = paddle.sin
1652 1653
        self.public_python_api = paddle.sin
        self.prim_op_type = "prim"
1654
        self.init_dtype()
1655
        self.init_shape()
1656 1657
        # prim not support now
        self.enable_cinn = False
1658

1659
        np.random.seed(1024)
1660
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1661 1662 1663
        out = np.sin(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1664
        self.convert_input_output()
C
add cos  
chengduoZH 已提交
1665

1666 1667 1668
    def init_shape(self):
        self.shape = [10, 12]

C
add cos  
chengduoZH 已提交
1669
    def test_check_grad(self):
1670 1671
        if self.dtype == np.float16:
            return
1672
        self.check_grad(['X'], 'Out', check_prim=True)
C
add cos  
chengduoZH 已提交
1673 1674


1675 1676 1677 1678 1679
class TestSin_ZeroDim(TestSin):
    def init_shape(self):
        self.shape = []


1680 1681 1682
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
W
wanghuancoder 已提交
1683
        self.python_api = paddle.asin
1684
        self.init_dtype()
1685
        self.init_shape()
1686

1687
        np.random.seed(2048)
1688
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1689 1690 1691 1692
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1693
        self.convert_input_output()
1694

1695 1696 1697
    def init_shape(self):
        self.shape = [10, 12]

1698 1699 1700
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1701
        self.check_grad(['X'], 'Out')
1702 1703


1704 1705 1706 1707 1708
class TestAsin_ZeroDim(TestAsin):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1709 1710 1711
class TestAcosh(TestActivation):
    def setUp(self):
        self.op_type = "acosh"
W
wanghuancoder 已提交
1712
        self.python_api = paddle.acosh
X
xiaoting 已提交
1713
        self.init_dtype()
1714
        self.init_shape()
X
xiaoting 已提交
1715 1716

        np.random.seed(1024)
1717
        x = np.random.uniform(2, 3, self.shape).astype(self.dtype)
X
xiaoting 已提交
1718 1719 1720 1721
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1722
        self.convert_input_output()
X
xiaoting 已提交
1723

1724 1725 1726
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1727 1728 1729 1730 1731 1732
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1733 1734 1735 1736 1737
class TestAcosh_ZeroDim(TestAcosh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1738 1739 1740
class TestAsinh(TestActivation):
    def setUp(self):
        self.op_type = "asinh"
W
wanghuancoder 已提交
1741
        self.python_api = paddle.asinh
X
xiaoting 已提交
1742
        self.init_dtype()
1743
        self.init_shape()
X
xiaoting 已提交
1744 1745

        np.random.seed(1024)
1746
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
X
xiaoting 已提交
1747 1748 1749 1750
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1751
        self.convert_input_output()
X
xiaoting 已提交
1752

1753 1754 1755
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1756 1757 1758 1759 1760 1761
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1762 1763 1764 1765 1766
class TestAsinh_ZeroDim(TestAsinh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1767 1768 1769
class TestAtanh(TestActivation):
    def setUp(self):
        self.op_type = "atanh"
W
wanghuancoder 已提交
1770
        self.python_api = paddle.atanh
X
xiaoting 已提交
1771
        self.init_dtype()
1772
        self.init_shape()
X
xiaoting 已提交
1773 1774

        np.random.seed(400)
1775
        x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype)
X
xiaoting 已提交
1776 1777 1778 1779
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1780
        self.convert_input_output()
X
xiaoting 已提交
1781

1782 1783 1784
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1785 1786 1787 1788 1789 1790
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1791 1792 1793 1794 1795
class TestAtanh_ZeroDim(TestAtanh):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1796
class TestRound(TestActivation):
D
dzhwinter 已提交
1797 1798
    def setUp(self):
        self.op_type = "round"
1799
        self.python_api = paddle.round
1800
        self.init_dtype()
1801
        self.init_shape()
1802

1803
        np.random.seed(1024)
1804
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1805 1806 1807 1808
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1809
        self.convert_input_output()
D
dzhwinter 已提交
1810

1811 1812 1813
    def init_shape(self):
        self.shape = [10, 12]

C
chengduo 已提交
1814
    def test_check_grad(self):
1815 1816 1817
        pass


1818 1819 1820 1821 1822
class TestRound_ZeroDim(TestRound):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1823
class TestRelu(TestActivation):
1824
    def setUp(self):
Q
qijun 已提交
1825
        self.op_type = "relu"
K
Kang Zhao 已提交
1826 1827
        self.python_api = paddle.nn.functional.relu
        self.prim_op_type = "comp"
1828
        self.public_python_api = paddle.nn.functional.relu
K
Kexin Zhao 已提交
1829
        self.init_dtype()
1830
        self.init_shape()
1831
        self.if_enable_cinn()
K
Kexin Zhao 已提交
1832

1833
        np.random.seed(1024)
1834 1835 1836 1837 1838
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0)
        self.inputs = {'X': x}
K
Kexin Zhao 已提交
1839 1840

        self.outputs = {'Out': out}
1841
        self.convert_input_output()
1842 1843

    def test_check_grad(self):
K
Kexin Zhao 已提交
1844 1845
        if self.dtype == np.float16:
            return
K
Kang Zhao 已提交
1846 1847 1848 1849 1850
        self.check_grad(['X'], 'Out', check_prim=True)

    def test_check_output(self):
        self.check_output(check_prim=True)

1851 1852
    def if_enable_cinn(self):
        pass
A
Adam 已提交
1853 1854


1855 1856 1857 1858
class TestRelu_ZeroDim(TestRelu):
    def init_shape(self):
        self.shape = []

1859
    def if_enable_cinn(self):
K
Kang Zhao 已提交
1860 1861
        self.enable_cinn = False

1862

1863 1864 1865
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1866
        np.random.seed(1024)
1867
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1868 1869 1870
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1871
            else paddle.CPUPlace()
1872
        )
1873 1874 1875 1876
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1877 1878

    def test_static_api(self):
1879
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1880
            with paddle.static.program_guard(paddle.static.Program()):
1881
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
1882 1883 1884 1885 1886 1887 1888 1889
                out1 = self.relu(x)
                m = paddle.nn.ReLU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.maximum(self.x_np, 0)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1890 1891 1892 1893

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1894 1895
        out1 = m(x)
        out2 = self.relu(x)
1896 1897
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1898
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1899

1900
    def test_errors(self):
1901 1902
        with paddle.fluid.framework._static_guard():
            with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1903 1904 1905 1906
                with paddle.static.program_guard(paddle.static.Program()):
                    # The input type must be Variable.
                    self.assertRaises(TypeError, self.relu, 1)
                    # The input dtype must be float16, float32, float64.
1907
                    x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1908 1909 1910 1911
                        name='x_int32', shape=[10, 12], dtype='int32'
                    )
                    self.assertRaises(TypeError, self.relu, x_int32)
                    # support the input dtype is float16
1912
                    x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1913 1914 1915
                        name='x_fp16', shape=[10, 12], dtype='float16'
                    )
                    self.relu(x_fp16)
1916 1917 1918 1919 1920 1921


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1922 1923


1924 1925 1926 1927 1928 1929
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1930
class TestLeakyRelu(TestActivation):
1931 1932 1933
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1934 1935
    def setUp(self):
        self.op_type = "leaky_relu"
W
wanghuancoder 已提交
1936
        self.python_api = paddle.nn.functional.leaky_relu
1937 1938
        self.public_python_api = paddle.nn.functional.leaky_relu
        self.prim_op_type = "comp"
A
Adam 已提交
1939
        self.init_dtype()
1940
        self.init_shape()
1941
        self.if_enable_cinn()
1942
        alpha = self.get_alpha()
A
Adam 已提交
1943

1944
        np.random.seed(1024)
1945
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
A
Adam 已提交
1946
        # The same reason with TestAbs
1947 1948
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1949

1950
        self.inputs = {'X': x}
A
Adam 已提交
1951
        self.outputs = {'Out': out}
1952
        self.attrs = {'alpha': alpha}
1953
        self.convert_input_output()
A
Adam 已提交
1954

1955 1956 1957
    def if_enable_cinn(self):
        pass

1958 1959 1960
    def test_check_output(self):
        self.check_output(check_prim=True)

A
Adam 已提交
1961 1962 1963
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1964
        self.check_grad(['X'], 'Out', check_prim=True)
1965 1966


1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981
class TestLeakyReluAlpha1(TestLeakyRelu):
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
    def get_alpha(self):
        return -2.0


1982 1983 1984 1985
class TestLeakyRelu_ZeroDim(TestLeakyRelu):
    def init_shape(self):
        self.shape = []

1986
    def if_enable_cinn(self):
1987 1988
        self.enable_cinn = False

1989

1990 1991 1992
class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    def setUp(self):
1993
        np.random.seed(1024)
1994
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1995 1996 1997
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1998
            else paddle.CPUPlace()
1999
        )
2000 2001

    def test_static_api(self):
2002
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2003
            with paddle.static.program_guard(paddle.static.Program()):
2004
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2005 2006 2007 2008 2009 2010 2011 2012
                out1 = F.leaky_relu(x)
                m = paddle.nn.LeakyReLU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_leaky_relu(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2013 2014

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
2015
        x = paddle.to_tensor(self.x_np)
2016 2017 2018 2019 2020
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
2021
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2022 2023 2024 2025 2026 2027

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
2028
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2029

2030
    def test_errors(self):
2031
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2032 2033 2034 2035
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.leaky_relu, 1)
                # The input dtype must be float16, float32, float64.
2036
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2037 2038 2039 2040
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.leaky_relu, x_int32)
                # support the input dtype is float16
2041
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2042 2043 2044
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.leaky_relu(x_fp16)
2045 2046


2047 2048
def gelu(x, approximate):
    if approximate:
2049 2050 2051 2052 2053 2054 2055 2056
        y_ref = (
            0.5
            * x
            * (
                1.0
                + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
            )
        )
2057 2058 2059 2060 2061 2062
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
2063 2064
    def setUp(self):
        self.op_type = "gelu"
2065 2066
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.gelu
2067
        self.public_python_api = paddle.nn.functional.gelu
C
Clementine 已提交
2068
        self.init_dtype()
2069
        self.init_shape()
2070
        approximate = True
2071
        np.random.seed(1024)
2072
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2073
        out = gelu(x, approximate)
C
Clementine 已提交
2074

2075
        self.inputs = {'X': x}
2076 2077 2078
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

2079 2080 2081 2082
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu device, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2083 2084 2085
        # Cumulative error occurs between comp and cinn, so that we also set cinn_rtol to 1e-8 as rev_comp_rtol = 1e-8
        self.cinn_rtol = 1e-8
        self.cinn_atol = 1e-8
C
cxxly 已提交
2086

2087 2088 2089
    def test_check_output(self):
        self.check_output(check_prim=True)

2090 2091 2092
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2093
        self.check_grad(['X'], 'Out', check_prim=True)
2094 2095 2096 2097 2098


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
2099 2100
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.gelu
2101
        self.public_python_api = paddle.nn.functional.gelu
2102
        self.init_dtype()
2103
        self.init_shape()
2104
        approximate = False
2105
        np.random.seed(2048)
2106
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2107
        out = gelu(x, approximate)
2108
        self.if_enable_cinn()
C
Clementine 已提交
2109

2110
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
C
Clementine 已提交
2111
        self.outputs = {'Out': out}
2112
        self.convert_input_output()
2113
        self.attrs = {"approximate": approximate}
2114 2115 2116 2117
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2118 2119 2120
        # Cumulative error occurs between comp and cinn, so that we also set cinn_rtol to 1e-8 as rev_comp_rtol = 1e-8
        self.cinn_rtol = 1e-8
        self.cinn_atol = 1e-8
C
Clementine 已提交
2121

2122
    def if_enable_cinn(self):
2123
        pass
2124 2125 2126 2127

    def test_check_output(self):
        self.check_output(check_prim=True)

C
Clementine 已提交
2128 2129 2130
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2131
        self.check_grad(['X'], 'Out', check_prim=True)
C
Clementine 已提交
2132 2133


2134 2135 2136 2137
class TestGelu_ZeroDim(TestGelu):
    def init_shape(self):
        self.shape = []

2138 2139 2140
    def if_enable_cinn(self):
        self.enable_cinn = False

2141

2142 2143 2144
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
2145
        np.random.seed(1024)
2146
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
2147 2148 2149
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2150
            else paddle.CPUPlace()
2151
        )
C
cxxly 已提交
2152 2153
        self.enable_cinn = False

2154 2155 2156 2157
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2158 2159

    def test_static_api(self):
2160
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2161
            with paddle.static.program_guard(paddle.static.Program()):
2162
                x = paddle.static.data('X', [11, 17], dtype="float32")
W
wanghuancoder 已提交
2163 2164 2165 2166 2167 2168 2169 2170
                out1 = F.gelu(x)
                m = paddle.nn.GELU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = gelu(self.x_np, False)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2171 2172 2173 2174 2175 2176 2177 2178

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
2179
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2180 2181 2182 2183 2184 2185

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
2186
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2187 2188

    def test_errors(self):
2189
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2190 2191 2192 2193
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.gelu, 1)
                # The input dtype must be float16, float32, float64.
2194
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2195 2196 2197 2198
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.gelu, x_int32)
                # support the input dtype is float16
2199
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2200 2201 2202
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.gelu(x_fp16)
2203 2204


C
chengduo 已提交
2205
class TestBRelu(TestActivation):
2206 2207
    def setUp(self):
        self.op_type = "brelu"
W
wanghuancoder 已提交
2208
        self.python_api = paddle.nn.functional.hardtanh
2209 2210
        self.init_dtype()

2211
        np.random.seed(1024)
Z
zhupengyang 已提交
2212
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2213 2214
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
2215 2216
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
2217
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
2218 2219 2220
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
2221 2222

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
F
fengjiayi 已提交
2223
        self.outputs = {'Out': t}
2224 2225
        self.convert_input_output()
        self.attrs = {'t_min': t_min, 't_max': t_max}
2226 2227

    def test_check_grad(self):
2228 2229
        if self.dtype == np.float16:
            return
2230
        self.check_grad(['X'], 'Out')
2231

2232

2233 2234 2235 2236 2237 2238 2239
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
2240
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
2241
    def setUp(self):
2242
        self.op_type = "relu6"
2243
        self.init_dtype()
2244
        self.init_shape()
2245
        self.python_api = paddle.nn.functional.relu6
2246

2247
        np.random.seed(1024)
2248
        x = np.random.uniform(-1, 10, self.shape).astype(self.dtype)
2249
        x[np.abs(x) < 0.005] = 0.02
2250
        out = ref_relu6(x)
2251

2252
        self.attrs = {'threshold': 6.0}
2253 2254

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2255
        self.outputs = {'Out': out}
2256
        self.convert_input_output()
K
Kavya Srinet 已提交
2257

2258 2259 2260
    def init_shape(self):
        self.shape = [10, 12]

2261 2262 2263
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2264
        self.check_grad(['X'], 'Out')
2265 2266


2267 2268 2269 2270 2271
class TestRelu6_ZeroDim(TestRelu6):
    def init_shape(self):
        self.shape = []


2272 2273 2274
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
2275
        np.random.seed(1024)
2276 2277
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
2278 2279 2280
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2281
            else paddle.CPUPlace()
2282
        )
2283 2284

    def test_static_api(self):
2285
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2286
            with paddle.static.program_guard(paddle.static.Program()):
2287
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2288 2289 2290 2291 2292 2293 2294 2295
                out1 = F.relu6(x)
                relu6 = paddle.nn.ReLU6()
                out2 = relu6(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_relu6(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2296 2297 2298 2299 2300 2301 2302 2303

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
2304
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2305 2306

    def test_fluid_api(self):
2307
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2308
            with fluid.program_guard(fluid.Program()):
2309
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2310 2311 2312 2313 2314
                out = paddle.nn.functional.relu6(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_relu6(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2315

2316
    def test_errors(self):
2317
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2318 2319 2320 2321
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.relu6, 1)
                # The input dtype must be float16, float32, float64.
2322
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2323 2324 2325 2326
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.relu6, x_int32)
                # support the input dtype is float16
2327
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2328 2329 2330
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.relu6(x_fp16)
2331 2332


2333 2334
class TestRelu6APIWarnings(unittest.TestCase):
    def test_warnings(self):
2335
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
            with warnings.catch_warnings(record=True) as context:
                warnings.simplefilter("always")

                helper = LayerHelper("relu6")
                data = paddle.static.data(
                    name='data', shape=[None, 3, 32, 32], dtype='float32'
                )
                out = helper.create_variable_for_type_inference(
                    dtype=data.dtype
                )
                os.environ['FLAGS_print_extra_attrs'] = "1"
                helper.append_op(
                    type="relu6",
                    inputs={'X': data},
                    outputs={'Out': out},
                    attrs={'threshold': 6.0},
                )
                self.assertTrue(
                    "op relu6 use extra_attr: threshold"
                    in str(context[-1].message)
                )
                os.environ['FLAGS_print_extra_attrs'] = "0"
2358 2359


2360
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
2361 2362 2363 2364
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
2365 2366 2367
    return (
        x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale
    ).astype(x_dtype)
2368 2369


H
huangjun12 已提交
2370 2371 2372 2373
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
2374
        self.init_shape()
R
Roc 已提交
2375
        self.prim_op_type = "comp"
2376
        self.python_api = paddle.nn.functional.hardswish
2377
        self.public_python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
2378

2379
        np.random.seed(1024)
2380
        x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
H
huangjun12 已提交
2381 2382 2383
        threshold = 6.0
        scale = 6.0
        offset = 3.0
2384
        # the same with TestAbs
H
huangjun12 已提交
2385 2386
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
2387
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
2388

2389
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
H
huangjun12 已提交
2390
        self.outputs = {'Out': out}
2391 2392
        self.convert_input_output()
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
R
Roc 已提交
2393
        self.enable_cinn = False
H
huangjun12 已提交
2394

2395 2396 2397
    def init_shape(self):
        self.shape = [10, 12]

2398 2399 2400
    def if_only_check_prim(self):
        return False

H
huangjun12 已提交
2401
    def test_check_grad(self):
2402 2403 2404 2405 2406 2407
        self.check_grad(
            ['X'],
            'Out',
            check_prim=True,
            only_check_prim=self.if_only_check_prim(),
        )
2408 2409

    def test_check_output(self):
W
wanghuancoder 已提交
2410
        self.check_output(check_prim=True)
H
huangjun12 已提交
2411 2412


2413
class TestHardSwish_ZeroDim(TestHardSwish):
R
Roc 已提交
2414 2415 2416 2417 2418 2419 2420 2421
    def setUp(self):
        super().setUp()
        self.enable_cinn = False

    def init_shape(self):
        self.shape = []


2422 2423 2424 2425
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2426 2427 2428
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2429
            else paddle.CPUPlace()
2430
        )
2431 2432

    def test_static_api(self):
2433
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2434
            with paddle.static.program_guard(paddle.static.Program()):
2435
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2436 2437 2438 2439 2440 2441 2442 2443
                out1 = F.hardswish(x)
                m = paddle.nn.Hardswish()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardswish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2444 2445

    def test_dygraph_api(self):
2446
        x = paddle.to_tensor([11648.0, 11448.0])
2447 2448 2449
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
2450
        out_ref = [11648.0, 11448.0]
2451
        for r in [out1, out2]:
2452
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2453 2454

    def test_fluid_api(self):
2455
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2456
            with fluid.program_guard(fluid.Program()):
2457
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2458 2459 2460 2461 2462 2463
                out = paddle.nn.functional.hardswish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_hardswish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)

2464
        x = paddle.to_tensor(self.x_np)
2465
        out = paddle.nn.functional.hardswish(x)
2466
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
2467 2468

    def test_errors(self):
2469
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2470 2471 2472 2473
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardswish, 1)
                # The input dtype must be float16, float32, float64.
2474
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2475 2476 2477 2478
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardswish, x_int32)
                # support the input dtype is float16
2479
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2480 2481 2482
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardswish(x_fp16)
2483 2484


C
chengduo 已提交
2485
class TestSoftRelu(TestActivation):
2486 2487
    def setUp(self):
        self.op_type = "soft_relu"
2488 2489
        self.init_dtype()

2490
        np.random.seed(4096)
2491
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2492
        threshold = 2.0
Q
qijun 已提交
2493 2494
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
2495
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
2496 2497 2498
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
2499
        out = np.log(np.exp(t) + 1)
2500 2501 2502

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2503 2504
        self.convert_input_output()
        self.attrs = {'threshold': threshold}
2505

2506 2507 2508
    def test_check_output(self):
        self.check_output(check_dygraph=False)

2509
    def test_check_grad(self):
2510 2511
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2512 2513 2514
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.02, check_dygraph=False
        )
2515

2516

2517
def elu(x, alpha):
Z
zhupengyang 已提交
2518
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
2519 2520 2521
    return out_ref.astype(x.dtype)


C
chengduo 已提交
2522
class TestELU(TestActivation):
2523 2524
    def setUp(self):
        self.op_type = "elu"
2525
        self.init_dtype()
2526
        self.init_shape()
W
wanghuancoder 已提交
2527
        self.python_api = paddle.nn.functional.elu
2528

2529
        np.random.seed(1024)
2530
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
Z
zhupengyang 已提交
2531
        alpha = self.get_alpha()
2532
        out = elu(x, alpha)
2533 2534
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
2535 2536

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2537
        self.outputs = {'Out': out}
2538 2539
        self.convert_input_output()
        self.attrs = {'alpha': alpha}
2540

2541 2542 2543
    def init_shape(self):
        self.shape = [10, 12]

2544
    def test_check_grad(self):
2545 2546
        if self.dtype == np.float16:
            return
2547
        self.check_grad(['X'], 'Out')
2548

Z
zhupengyang 已提交
2549
    def get_alpha(self):
2550
        return 1.0
Z
zhupengyang 已提交
2551 2552 2553 2554 2555 2556


class TestELUAlpha(TestELU):
    def get_alpha(self):
        return -0.2

2557

2558 2559 2560 2561 2562
class TestELU_ZeroDim(TestELU):
    def init_shape(self):
        self.shape = []


2563 2564 2565
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
2566
        np.random.seed(1024)
2567
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2568 2569 2570
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2571
            else paddle.CPUPlace()
2572
        )
2573 2574 2575 2576
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2577 2578

    def test_static_api(self):
2579
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2580
            with paddle.static.program_guard(paddle.static.Program()):
2581
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2582 2583 2584 2585 2586 2587 2588 2589
                out1 = self.elu(x)
                m = paddle.nn.ELU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = elu(self.x_np, 1.0)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2590 2591 2592

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
2593 2594
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2595 2596 2597 2598
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2599
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2600

2601 2602
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2603 2604 2605 2606
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2607
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2608

2609
    def test_errors(self):
2610
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2611 2612 2613 2614
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.elu, 1)
                # The input dtype must be float16, float32, float64.
2615
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2616 2617 2618 2619
                    name='x_int32', shape=[10, 12], dtype='int32'
                )
                self.assertRaises(TypeError, self.elu, x_int32)
                # support the input dtype is float16
2620
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2621 2622 2623
                    name='x_fp16', shape=[10, 12], dtype='float16'
                )
                self.elu(x_fp16)
2624 2625


Z
zhupengyang 已提交
2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)


2636 2637 2638 2639 2640 2641 2642 2643 2644
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()
2645
        self.init_shape()
2646

2647
        self.python_api = paddle.nn.functional.celu
2648
        np.random.seed(1024)
2649
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
2650 2651
        alpha = 1.5
        out = celu(x, alpha)
2652 2653

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2654
        self.outputs = {'Out': out}
2655 2656
        self.convert_input_output()
        self.attrs = {'alpha': alpha}
2657

2658 2659 2660
    def init_shape(self):
        self.shape = [10, 12]

2661 2662 2663
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2664
        self.check_grad(['X'], 'Out')
2665 2666


2667 2668 2669 2670 2671
class TestCELU_ZeroDim(TestCELU):
    def init_shape(self):
        self.shape = []


2672 2673 2674 2675 2676
class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2677 2678 2679
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2680
            else paddle.CPUPlace()
2681
        )
2682 2683 2684 2685 2686 2687
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
2688
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2689
            with paddle.static.program_guard(paddle.static.Program()):
2690
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2691 2692 2693 2694 2695 2696 2697 2698
                out1 = self.celu(x, 1.5)
                m = paddle.nn.CELU(1.5)
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = celu(self.x_np, 1.5)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2699 2700 2701 2702 2703 2704 2705 2706 2707

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2708
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2709 2710 2711 2712 2713 2714 2715

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2716
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2717 2718

    def test_errors(self):
2719
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2720 2721 2722 2723
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.celu, 1)
                # The input dtype must be float16, float32, float64.
2724
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2725 2726 2727 2728
                    name='x_int32', shape=[10, 12], dtype='int32'
                )
                self.assertRaises(TypeError, self.celu, x_int32)
                # The alpha must be not equal 0
2729
                x_fp32 = paddle.static.data(
W
wanghuancoder 已提交
2730 2731 2732 2733
                    name='x_fp32', shape=[10, 12], dtype='float32'
                )
                self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
                # support the input dtype is float16
2734
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2735 2736 2737
                    name='x_fp16', shape=[10, 12], dtype='float16'
                )
                self.celu(x_fp16)
2738 2739


C
chengduo 已提交
2740
class TestReciprocal(TestActivation):
Q
qijun 已提交
2741 2742
    def setUp(self):
        self.op_type = "reciprocal"
2743
        self.python_api = paddle.reciprocal
2744
        self.init_dtype()
2745
        self.init_shape()
2746

2747
        np.random.seed(1024)
2748
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2749 2750 2751 2752
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2753
        self.convert_input_output()
Q
qijun 已提交
2754 2755

    def test_check_grad(self):
2756 2757
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2758
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
2759 2760

    def test_check_output(self):
W
wanghuancoder 已提交
2761
        self.check_output()
Q
qijun 已提交
2762 2763


2764 2765 2766 2767 2768
class TestReciprocal_ZeroDim(TestReciprocal):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
2769
class TestLog(TestActivation):
Q
qijun 已提交
2770 2771
    def setUp(self):
        self.op_type = "log"
2772
        self.prim_op_type = "prim"
2773
        self.python_api = paddle.log
2774
        self.public_python_api = paddle.log
2775
        self.init_dtype()
2776
        self.init_shape()
2777
        self.if_enable_cinn()
2778

2779
        np.random.seed(1024)
2780
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2781 2782 2783 2784
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2785
        self.convert_input_output()
Q
qijun 已提交
2786

2787 2788 2789
    def if_enable_cinn(self):
        pass

Q
qijun 已提交
2790
    def test_check_grad(self):
2791 2792
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2793
        self.check_grad(['X'], 'Out', check_prim=True)
Q
qijun 已提交
2794

2795
    def test_error(self):
2796 2797
        with paddle.fluid.framework._static_guard():
            with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2798 2799 2800 2801 2802 2803
                in1 = paddle.static.data(
                    name="in1", shape=[11, 17], dtype="int32"
                )
                in2 = paddle.static.data(
                    name="in2", shape=[11, 17], dtype="int64"
                )
2804

W
wanghuancoder 已提交
2805 2806
                self.assertRaises(TypeError, paddle.log, in1)
                self.assertRaises(TypeError, paddle.log, in2)
2807

2808

2809 2810
class Test_Log_Op_Fp16(unittest.TestCase):
    def test_api_fp16(self):
2811
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2812 2813 2814 2815 2816 2817 2818 2819 2820 2821
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='float16')
                out = paddle.log(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])
2822 2823


2824 2825 2826 2827
class TestLog_ZeroDim(TestLog):
    def init_shape(self):
        self.shape = []

2828 2829 2830
    def if_enable_cinn(self):
        self.enable_cinn = False

2831

J
joejiong 已提交
2832 2833 2834
class TestLog2(TestActivation):
    def setUp(self):
        self.op_type = "log2"
2835
        self.python_api = paddle.log2
J
joejiong 已提交
2836
        self.init_dtype()
2837
        self.init_shape()
J
joejiong 已提交
2838

2839
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2840 2841 2842 2843
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2844
        self.convert_input_output()
J
joejiong 已提交
2845 2846 2847 2848

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2849
        self.check_grad(['X'], 'Out')
J
joejiong 已提交
2850 2851

    def test_error(self):
2852
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2853 2854
            in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
            in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
J
joejiong 已提交
2855

W
wanghuancoder 已提交
2856 2857
            self.assertRaises(TypeError, paddle.log2, in1)
            self.assertRaises(TypeError, paddle.log2, in2)
J
joejiong 已提交
2858 2859

    def test_api(self):
2860
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x", shape=[11, 17], dtype="float64"
                )

                out1 = paddle.log2(data_x)
                exe = paddle.static.Executor(place=fluid.CPUPlace())
                exe.run(paddle.static.default_startup_program())
                (res1,) = exe.run(
                    paddle.static.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log2(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2879 2880 2881 2882 2883 2884 2885 2886

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2887
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2888 2889


2890 2891 2892 2893 2894
class TestLog2_ZeroDim(TestLog2):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2895 2896 2897
class TestLog10(TestActivation):
    def setUp(self):
        self.op_type = "log10"
2898
        self.python_api = paddle.log10
J
joejiong 已提交
2899
        self.init_dtype()
2900
        self.init_shape()
J
joejiong 已提交
2901

2902
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2903 2904 2905 2906
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2907
        self.convert_input_output()
J
joejiong 已提交
2908 2909 2910 2911

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2912
        self.check_grad(['X'], 'Out')
J
joejiong 已提交
2913

2914 2915 2916 2917 2918 2919 2920

class TestLog10_ZeroDim(TestLog10):
    def init_shape(self):
        self.shape = []


class TestLog10API(unittest.TestCase):
J
joejiong 已提交
2921
    def test_error(self):
2922
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2923 2924
            in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
            in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
J
joejiong 已提交
2925

W
wanghuancoder 已提交
2926 2927
            self.assertRaises(TypeError, paddle.log10, in1)
            self.assertRaises(TypeError, paddle.log10, in2)
J
joejiong 已提交
2928 2929

    def test_api(self):
2930
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x", shape=[11, 17], dtype="float64"
                )

                out1 = paddle.log10(data_x)
                exe = paddle.static.Executor(place=paddle.CPUPlace())
                exe.run(paddle.static.default_startup_program())
                (res1,) = exe.run(
                    paddle.static.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log10(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2949 2950 2951 2952 2953 2954 2955 2956

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2957
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2958 2959


2960 2961 2962
class TestLog1p(TestActivation):
    def setUp(self):
        self.op_type = "log1p"
2963
        self.python_api = paddle.log1p
2964
        self.init_dtype()
2965
        self.init_shape()
2966

2967
        np.random.seed(1024)
2968
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2969 2970 2971 2972
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2973
        self.convert_input_output()
2974 2975 2976 2977

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2978
        self.check_grad(['X'], 'Out')
2979

2980

2981 2982
class Test_Log1p_Op_Fp16(unittest.TestCase):
    def test_api_fp16(self):
2983
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2984 2985 2986 2987 2988 2989 2990 2991 2992 2993
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='float16')
                out = paddle.log1p(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])
2994 2995


2996 2997 2998 2999 3000 3001
class TestLog1p_ZeroDim(TestLog1p):
    def init_shape(self):
        self.shape = []


class TestLog1pAPI(unittest.TestCase):
3002
    def test_api(self):
3003
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x",
                    shape=[11, 17],
                    dtype="float64",
                )

                out1 = paddle.log1p(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (res1,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log1p(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
3022 3023 3024 3025 3026 3027 3028 3029

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
3030
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
3031 3032


C
chengduo 已提交
3033
class TestSquare(TestActivation):
Q
qijun 已提交
3034 3035
    def setUp(self):
        self.op_type = "square"
3036
        self.python_api = paddle.square
3037
        self.init_dtype()
3038
        self.init_shape()
3039

3040
        np.random.seed(1024)
3041
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
3042 3043 3044 3045
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
3046
        self.convert_input_output()
Q
qijun 已提交
3047 3048

    def test_check_grad(self):
3049 3050
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3051
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
3052 3053

    def test_check_output(self):
W
wanghuancoder 已提交
3054
        self.check_output()
Q
qijun 已提交
3055

3056

3057 3058 3059 3060 3061
class TestSquare_ZeroDim(TestSquare):
    def init_shape(self):
        self.shape = []


3062 3063 3064
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
3065 3066 3067
class TestSquareBF16(OpTest):
    def setUp(self):
        self.op_type = "square"
3068
        self.python_api = paddle.square
3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
3085
        self.check_output_with_place(place)
3086 3087 3088

    def test_check_grad(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
3089
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.5)
3090 3091


C
chengduo 已提交
3092
class TestPow(TestActivation):
3093 3094
    def setUp(self):
        self.op_type = "pow"
3095
        self.prim_op_type = "comp"
3096
        self.python_api = paddle.pow
3097
        self.public_python_api = paddle.pow
3098
        self.init_dtype()
3099
        self.init_shape()
3100
        self.if_enable_cinn()
3101

3102
        np.random.seed(1024)
3103
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
3104 3105 3106 3107
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
3108 3109
        self.attrs = {'factor': 3.0}
        self.convert_input_output()
3110

3111 3112 3113
    def if_enable_cinn(self):
        pass

3114
    def test_check_output(self):
3115
        self.check_output(check_prim=True)
3116

3117
    def test_check_grad(self):
3118 3119
        if self.dtype == np.float16:
            return
3120
        self.check_grad(['X'], 'Out', check_prim=True)
3121

3122

3123 3124 3125 3126
class TestPow_ZeroDim(TestPow):
    def init_shape(self):
        self.shape = []

3127
    def if_enable_cinn(self):
3128 3129
        self.enable_cinn = False

3130

3131 3132 3133
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
3134
        self.python_api = paddle.pow
3135
        self.enable_cinn = False
3136 3137
        self.init_dtype()

3138
        np.random.seed(1024)
3139 3140 3141 3142 3143
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
W
wanghuancoder 已提交
3144
            'FactorTensor': np.array([3.0]).astype(self.dtype),
3145 3146 3147 3148 3149 3150
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
W
wanghuancoder 已提交
3151
        self.check_output()
3152 3153 3154 3155

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3156
        self.check_grad(['X'], 'Out')
3157 3158

    def test_api(self):
3159
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3160 3161 3162 3163 3164 3165 3166
            input = np.random.uniform(1, 2, [11, 17]).astype("float32")
            x = paddle.static.data(name="x", shape=[11, 17], dtype="float32")
            res = paddle.static.data(
                name="res", shape=[11, 17], dtype="float32"
            )

            factor_1 = 2.0
3167
            factor_2 = paddle.tensor.fill_constant([1], "float32", 3.0)
W
wanghuancoder 已提交
3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179
            out_1 = paddle.pow(x, factor_1)
            out_2 = paddle.pow(x, factor_2)
            out_4 = paddle.pow(x, factor_1, name='pow_res')
            out_6 = paddle.pow(x, factor_2)
            self.assertEqual(('pow_res' in out_4.name), True)

            exe = fluid.Executor(place=fluid.CPUPlace())
            res_1, res_2, res, res_6 = exe.run(
                fluid.default_main_program(),
                feed={"x": input},
                fetch_list=[out_1, out_2, res, out_6],
            )
3180

W
wanghuancoder 已提交
3181 3182 3183
            assert np.allclose(res_1, np.power(input, 2))
            assert np.allclose(res_2, np.power(input, 3))
            assert np.allclose(res_6, np.power(input, 3))
3184 3185


3186 3187 3188 3189 3190
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
3191
class TestSTanh(TestActivation):
3192 3193 3194 3195 3196 3197
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

3198 3199
    def setUp(self):
        self.op_type = "stanh"
W
wanghuancoder 已提交
3200
        self.python_api = paddle.stanh
3201
        self.init_dtype()
3202 3203
        self.init_shape()

3204 3205
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
3206

3207
        np.random.seed(1024)
3208
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
3209 3210
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
3211

3212
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3213
        self.outputs = {'Out': out}
3214 3215
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
        self.convert_input_output()
3216

Q
qijun 已提交
3217
    def test_check_grad(self):
3218 3219
        if self.dtype == np.float16:
            return
3220
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
3221

3222

3223 3224 3225 3226 3227 3228 3229 3230 3231 3232
class TestSTanhScaleA(TestSTanh):
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
    def get_scale_b(self):
        return 0.5


3233 3234 3235 3236 3237
class TestSTanh_ZeroDim(TestSTanh):
    def init_shape(self):
        self.shape = []


3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250
class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
3251 3252 3253
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
3254
            else paddle.CPUPlace()
3255
        )
3256 3257

    def test_static_api(self):
3258
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3259
            with paddle.static.program_guard(paddle.static.Program()):
3260
                x = paddle.static.data('X', [10, 12])
W
wanghuancoder 已提交
3261 3262 3263 3264 3265 3266
                out = paddle.stanh(x, self.scale_a, self.scale_b)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3267 3268 3269 3270 3271 3272

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
3273
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3274 3275

    def test_fluid_api(self):
3276
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3277
            with fluid.program_guard(fluid.Program()):
3278
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
3279 3280 3281 3282 3283
                out = paddle.stanh(x, self.scale_a, self.scale_b)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3284

3285
    def test_errors(self):
3286
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3287 3288 3289 3290
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.stanh, 1)
                # The input dtype must be float16, float32, float64.
3291
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3292 3293 3294 3295
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.stanh, x_int32)
                # support the input dtype is float16
3296
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3297 3298 3299
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.stanh(x_fp16)
3300 3301 3302 3303 3304 3305 3306 3307 3308 3309


class TestSTanhAPIScaleA(TestSTanhAPI):
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
    def get_scale_b(self):
        return 0.5
3310 3311


3312 3313
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
3314 3315 3316 3317
    out = np.select(
        [x_beta <= threshold, x_beta > threshold],
        [np.log(1 + np.exp(x_beta)) / beta, x],
    )
3318 3319 3320
    return out


C
chengduo 已提交
3321
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
3322 3323
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
3324
        self.python_api = paddle.nn.functional.softplus
3325
        self.init_dtype()
3326
        self.init_shape()
3327

3328 3329
        beta = 2
        threshold = 15
3330

3331
        np.random.seed(1024)
3332
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3333 3334 3335
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
3336
        self.outputs = {'Out': out}
K
kexinzhao 已提交
3337

3338 3339 3340
    def init_shape(self):
        self.shape = [10, 12]

K
kexinzhao 已提交
3341
    def test_check_grad(self):
3342 3343
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3344
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
3345

3346

3347 3348 3349 3350 3351
class TestSoftplus_ZeroDim(TestSoftplus):
    def init_shape(self):
        self.shape = []


3352 3353 3354
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
3355 3356 3357 3358
class TestSoftplusBF16(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()
W
wanghuancoder 已提交
3359
        self.python_api = paddle.nn.functional.softplus
3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


3383 3384 3385 3386 3387
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
3388
        np.random.seed(1024)
3389
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3390 3391 3392
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3393
            else paddle.CPUPlace()
3394
        )
3395 3396

    def test_static_api(self):
3397
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3398
            with paddle.static.program_guard(paddle.static.Program()):
3399
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3400 3401 3402 3403 3404 3405 3406 3407
                out1 = F.softplus(x, self.beta, self.threshold)
                softplus = paddle.nn.Softplus(self.beta, self.threshold)
                out2 = softplus(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3408 3409 3410 3411 3412 3413 3414 3415

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
3416
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3417 3418

    def test_errors(self):
3419
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3420 3421 3422 3423
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softplus, 1)
                # The input dtype must be float16, float32, float64.
3424
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3425 3426 3427 3428
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softplus, x_int32)
                # support the input dtype is float16
3429
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3430 3431 3432
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softplus(x_fp16)
3433 3434 3435 3436 3437 3438 3439


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
3440
class TestSoftsign(TestActivation):
3441 3442
    def setUp(self):
        self.op_type = "softsign"
3443
        self.init_dtype()
3444 3445
        self.init_shape()

3446
        self.python_api = paddle.nn.functional.softsign
3447

3448
        np.random.seed(1024)
3449
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3450
        out = ref_softsign(x)
3451 3452

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3453
        self.outputs = {'Out': out}
3454
        self.convert_input_output()
3455

3456 3457 3458
    def init_shape(self):
        self.shape = [10, 12]

3459
    def test_check_grad(self):
3460 3461
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3462
        self.check_grad(['X'], 'Out')
3463 3464


3465 3466 3467 3468 3469
class TestSoftsign_ZeroDim(TestSoftsign):
    def init_shape(self):
        self.shape = []


3470 3471 3472
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
3473
        np.random.seed(1024)
3474
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3475 3476 3477
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3478
            else paddle.CPUPlace()
3479
        )
3480 3481

    def test_static_api(self):
3482
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3483
            with paddle.static.program_guard(paddle.static.Program()):
3484
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3485 3486 3487 3488 3489 3490 3491 3492
                out1 = F.softsign(x)
                softsign = paddle.nn.Softsign()
                out2 = softsign(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softsign(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3493 3494 3495 3496 3497 3498 3499 3500

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
3501
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3502 3503

    def test_errors(self):
3504
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3505 3506 3507 3508
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softsign, 1)
                # The input dtype must be float16, float32, float64.
3509
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3510 3511 3512 3513
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softsign, x_int32)
                # support the input dtype is float16
3514
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3515 3516 3517
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softsign(x_fp16)
3518 3519


3520 3521 3522 3523 3524
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
3525
class TestThresholdedRelu(TestActivation):
3526 3527
    def setUp(self):
        self.op_type = "thresholded_relu"
3528
        self.init_dtype()
3529
        self.init_shape()
W
wanghuancoder 已提交
3530
        self.python_api = paddle.nn.functional.thresholded_relu
3531

3532
        threshold = 15
3533

3534
        np.random.seed(1024)
3535
        x = np.random.uniform(-20, 20, self.shape).astype(self.dtype)
3536 3537
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
3538 3539

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3540
        self.outputs = {'Out': out}
3541 3542
        self.attrs = {"threshold": threshold}
        self.convert_input_output()
3543

3544 3545 3546
    def init_shape(self):
        self.shape = [10, 12]

3547
    def test_check_grad(self):
3548 3549
        if self.dtype == np.float16:
            return
3550
        self.check_grad(['X'], 'Out')
3551 3552


3553 3554 3555 3556 3557
class TestThresholdedRelu_ZeroDim(TestThresholdedRelu):
    def init_shape(self):
        self.shape = []


3558 3559 3560 3561 3562 3563 3564
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
3565 3566 3567
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3568
            else paddle.CPUPlace()
3569
        )
3570 3571

    def test_static_api(self):
3572
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3573
            with paddle.static.program_guard(paddle.static.Program()):
3574
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3575 3576 3577 3578 3579 3580 3581 3582
                out1 = F.thresholded_relu(x, self.threshold)
                thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
                out2 = thresholded_relu(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_thresholded_relu(self.x_np, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3583 3584

    def test_dygraph_api(self):
3585
        paddle.disable_static()
3586 3587 3588 3589 3590 3591
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
3592
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3593

3594
    def test_errors(self):
3595
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3596 3597 3598 3599
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.thresholded_relu, 1)
                # The input dtype must be float16, float32, float64.
3600
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3601 3602 3603 3604
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.thresholded_relu, x_int32)
                # support the input dtype is float16
3605
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3606 3607 3608
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.thresholded_relu(x_fp16)
3609 3610


3611
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
3612
    return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype)
3613 3614


C
chengduo 已提交
3615
class TestHardSigmoid(TestActivation):
3616 3617
    def setUp(self):
        self.op_type = "hard_sigmoid"
3618 3619 3620 3621
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
3622
        self.init_shape()
W
wanghuancoder 已提交
3623
        self.python_api = paddle.nn.functional.hardsigmoid
3624

3625
        x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
3626
        lower_threshold = -self.offset / self.slope
3627
        upper_threshold = (1.0 - self.offset) / self.slope
Z
zhupengyang 已提交
3628

3629
        # Same reason as TestAbs
3630 3631 3632
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
3633

3634
        out = ref_hardsigmoid(x, self.slope, self.offset)
3635

3636
        self.attrs = {'slope': self.slope, 'offset': self.offset}
3637 3638

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3639
        self.outputs = {'Out': out}
3640
        self.convert_input_output()
3641

3642 3643 3644
    def init_shape(self):
        self.shape = [10, 12]

3645 3646
    def set_attrs(self):
        pass
3647

3648

3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659
class TestHardSigmoidFP32(TestHardSigmoid):
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


3660 3661 3662 3663 3664
class TestHardSigmoid_ZeroDim(TestHardSigmoid):
    def init_shape(self):
        self.shape = []


3665 3666 3667 3668
class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3669 3670 3671
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3672
            else paddle.CPUPlace()
3673
        )
3674 3675

    def test_static_api(self):
3676
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3677 3678 3679 3680 3681 3682 3683 3684 3685 3686
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.hardsigmoid(x)
                m = paddle.nn.Hardsigmoid()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardsigmoid(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3687 3688 3689 3690 3691 3692 3693 3694

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3695
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3696 3697

    def test_fluid_api(self):
3698
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3699
            with fluid.program_guard(fluid.Program()):
3700
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3701 3702 3703 3704 3705 3706
                out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)

3707
        paddle.disable_static(self.place)
3708
        x = paddle.to_tensor(self.x_np)
3709
        out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3710
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3711 3712

    def test_errors(self):
3713
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3714 3715 3716 3717
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardsigmoid, 1)
                # The input dtype must be float16, float32, float64.
3718
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3719 3720 3721 3722
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardsigmoid, x_int32)
                # support the input dtype is float16
3723
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3724 3725 3726
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardsigmoid(x_fp16)
3727 3728


3729 3730 3731 3732 3733
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3734
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
3735 3736
    def setUp(self):
        self.op_type = "swish"
3737
        self.python_api = paddle.nn.functional.swish
3738
        self.init_dtype()
3739 3740
        self.init_shape()

3741
        np.random.seed(1024)
3742
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3743
        out = ref_swish(x)
3744 3745

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3746
        self.outputs = {'Out': out}
3747 3748
        self.attrs = {'beta': 1.0}
        self.convert_input_output()
A
Abhinav Arora 已提交
3749

3750 3751 3752
    def init_shape(self):
        self.shape = [10, 12]

A
Abhinav Arora 已提交
3753
    def test_check_grad(self):
3754 3755
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3756 3757 3758 3759
        self.check_grad(
            ['X'],
            'Out',
        )
3760

A
Abhinav Arora 已提交
3761

3762 3763 3764 3765 3766
class TestSwish_ZeroDim(TestSwish):
    def init_shape(self):
        self.shape = []


3767 3768 3769 3770 3771
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3772 3773 3774
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3775
            else paddle.CPUPlace()
3776
        )
3777 3778

    def test_static_api(self):
3779
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3780 3781 3782 3783 3784 3785 3786 3787 3788 3789
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.swish(x)
                swish = paddle.nn.Swish()
                out2 = swish(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_swish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3790

3791
    def test_dygraph_api(self):
3792 3793 3794 3795 3796 3797
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3798
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3799 3800

    def test_fluid_api(self):
3801
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3802
            with fluid.program_guard(fluid.Program()):
3803
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3804 3805 3806 3807 3808
                out = paddle.nn.functional.swish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_swish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3809

3810
    def test_errors(self):
3811
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3812 3813 3814 3815
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.swish, 1)
                # The input dtype must be float16, float32, float64.
3816
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3817 3818 3819 3820
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.swish, x_int32)
                # support the input dtype is float16
3821
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3822 3823 3824
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.swish(x_fp16)
3825 3826


3827 3828 3829 3830
def ref_mish(x, threshold=20.0):
    softplus = np.select(
        [x <= threshold, x > threshold], [np.log(1 + np.exp(x)), x]
    )
3831 3832 3833 3834 3835 3836
    return x * np.tanh(softplus)


class TestMish(TestActivation):
    def setUp(self):
        self.op_type = "mish"
3837
        self.python_api = paddle.nn.functional.mish
3838
        self.init_dtype()
3839
        self.init_shape()
3840 3841

        np.random.seed(1024)
3842
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3843
        out = ref_mish(x)
3844 3845

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3846
        self.outputs = {'Out': out}
3847
        self.convert_input_output()
3848

3849 3850 3851
    def init_shape(self):
        self.shape = [10, 12]

3852
    def test_check_output(self):
W
wanghuancoder 已提交
3853
        self.check_output()
3854

3855 3856 3857
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3858
        self.check_grad(['X'], 'Out')
3859 3860


3861 3862 3863 3864 3865
class TestMish_ZeroDim(TestMish):
    def init_shape(self):
        self.shape = []


3866 3867 3868 3869 3870
class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3871 3872 3873
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3874
            else paddle.CPUPlace()
3875
        )
3876 3877

    def test_static_api(self):
3878
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3879 3880 3881 3882 3883 3884 3885 3886 3887 3888
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.mish(x)
                mish = paddle.nn.Mish()
                out2 = mish(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_mish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3889 3890 3891 3892 3893 3894 3895 3896

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3897
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3898 3899

    def test_fluid_api(self):
3900
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3901
            with fluid.program_guard(fluid.Program()):
3902
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3903 3904 3905 3906 3907
                out = paddle.nn.functional.mish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_mish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3908 3909

    def test_errors(self):
3910
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3911 3912 3913 3914
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.mish, 1)
                # The input dtype must be float16, float32, float64.
3915
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3916 3917 3918 3919
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.mish, x_int32)
                # support the input dtype is float16
3920
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3921 3922 3923
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.mish(x_fp16)
3924 3925


3926
# ------------------ Test Cudnn Activation----------------------
3927
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3928 3929 3930
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3931 3932 3933 3934
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

3935
    cls_name = "{}_{}".format(parent.__name__, "cudnn")
3936 3937 3938 3939 3940 3941 3942 3943 3944 3945
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


3946 3947
# ------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(
3948 3949 3950
    parent,
    atol=1e-3,
    grad_check=True,
3951
    check_dygraph=True,
3952
    check_prim=False,
3953
    enable_cinn=False,
3954
    grad_atol=1e-2,
3955
    **kwargs
3956 3957 3958 3959
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
C
chengduo 已提交
3960
    class TestActFp16(parent):
3961 3962 3963 3964 3965
        def setUp(self):
            super().setUp()
            for k, v in kwargs.items():
                setattr(self, k, v)

C
chengduo 已提交
3966 3967
        def init_dtype(self):
            self.dtype = np.float16
3968

3969
        def if_enable_cinn(self):
3970 3971
            self.enable_cinn = enable_cinn

C
chengduo 已提交
3972
        def test_check_output(self):
3973
            place = core.CUDAPlace(0)
C
chengduo 已提交
3974 3975
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
3976
                self.check_output_with_place(
3977 3978 3979 3980
                    place,
                    atol=atol,
                    check_dygraph=check_dygraph,
                    check_prim=check_prim,
3981
                )
3982

C
chengduo 已提交
3983 3984 3985 3986
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3987
                self.check_grad_with_place(
3988 3989 3990
                    place,
                    ['X'],
                    'Out',
3991
                    check_dygraph=check_dygraph,
3992 3993
                    check_prim=check_prim,
                    max_relative_error=grad_atol,
3994
                )
C
chengduo 已提交
3995

3996
    cls_name = "{}_{}".format(parent.__name__, "FP16OP")
C
chengduo 已提交
3997 3998 3999 4000
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


4001
create_test_act_fp16_class(TestActivation)
R
ronnywang 已提交
4002
create_test_act_fp16_class(TestExpm1)
4003 4004
create_test_act_fp16_class(TestSigmoid, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestSilu, check_prim=True, enable_cinn=True)
C
chengduo 已提交
4005
create_test_act_fp16_class(TestLogSigmoid)
4006
create_test_act_fp16_class(TestTanh, check_prim=True, enable_cinn=True)
4007
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
4008
create_test_act_fp16_class(TestHardShrink)
4009
create_test_act_fp16_class(TestSoftshrink)
4010 4011 4012
create_test_act_fp16_class(TestSqrt, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestSqrtComp, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestAbs, check_prim=True, enable_cinn=True)
C
chengduo 已提交
4013
create_test_act_fp16_class(TestCeil, grad_check=False)
4014 4015 4016
create_test_act_fp16_class(
    TestFloor, check_prim=True, grad_check=False, enable_cinn=True
)
4017 4018 4019 4020
create_test_act_fp16_class(TestCos)
create_test_act_fp16_class(TestTan)
create_test_act_fp16_class(TestCosh)
create_test_act_fp16_class(TestAcos)
C
chengduo 已提交
4021
create_test_act_fp16_class(TestSin)
4022
create_test_act_fp16_class(TestSinh)
4023 4024
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
4025 4026 4027
create_test_act_fp16_class(TestAcosh)
create_test_act_fp16_class(TestAsinh)
create_test_act_fp16_class(TestAtanh)
C
chengduo 已提交
4028
create_test_act_fp16_class(TestRound, grad_check=False)
4029
create_test_act_fp16_class(TestRelu, check_prim=True, enable_cinn=True)
4030 4031 4032
create_test_act_fp16_class(
    TestGelu,
    check_prim=True,
4033
    enable_cinn=True,
4034 4035
    rev_comp_rtol=1e-3,
    rev_comp_atol=1e-3,
4036 4037
    cinn_rtol=1e-3,
    cinn_atol=1e-3,
4038
)
C
chengduo 已提交
4039 4040
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
4041
create_test_act_fp16_class(TestSoftRelu, check_dygraph=False)
C
chengduo 已提交
4042
create_test_act_fp16_class(TestELU)
4043
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
4044
create_test_act_fp16_class(TestReciprocal)
4045
create_test_act_fp16_class(TestLog, check_prim=True)
4046
if core.is_compiled_with_rocm():
4047
    create_test_act_fp16_class(TestLog2)
4048
else:
4049 4050 4051
    create_test_act_fp16_class(TestLog2)
create_test_act_fp16_class(TestLog10)
create_test_act_fp16_class(TestLog1p)
C
chengduo 已提交
4052
create_test_act_fp16_class(TestSquare)
4053 4054 4055
create_test_act_fp16_class(TestPow, check_prim=True)
create_test_act_fp16_class(TestPow_factor_tensor)
create_test_act_fp16_class(TestSTanh)
C
chengduo 已提交
4056 4057 4058 4059
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
4060
create_test_act_fp16_class(TestSwish)
4061
create_test_act_fp16_class(TestHardSwish, check_prim=True)
4062
create_test_act_fp16_class(TestMish)
4063
create_test_act_fp16_class(TestLeakyRelu, check_prim=True, enable_cinn=True)
4064
create_test_act_fp16_class(
4065
    TestLeakyReluAlpha1, check_prim=True, enable_cinn=True
4066
)
4067 4068 4069 4070 4071 4072 4073 4074
create_test_act_fp16_class(
    TestLeakyReluAlpha2, check_prim=True, enable_cinn=True
)
create_test_act_fp16_class(
    TestLeakyReluAlpha3, check_prim=True, enable_cinn=True
)
create_test_act_fp16_class(TestLeakyRelu_ZeroDim, check_prim=True)
create_test_act_fp16_class(TestRsqrt, check_prim=True, enable_cinn=True)
A
Abhinav Arora 已提交
4075

4076

4077
def create_test_act_bf16_class(
4078 4079 4080 4081 4082
    parent,
    atol=1e-2,
    grad_check=True,
    check_dygraph=True,
    check_prim=False,
4083
    enable_cinn=False,
4084 4085
    grad_atol=1e-2,
    **kwargs
4086 4087
):
    @unittest.skipIf(
4088 4089 4090
        not core.is_compiled_with_cuda()
        or not core.is_bfloat16_supported(core.CUDAPlace(0)),
        "core is not compiled with CUDA and do not support bfloat16",
4091
    )
4092
    class TestActBF16(parent):
4093 4094 4095 4096 4097
        def setUp(self):
            super().setUp()
            for k, v in kwargs.items():
                setattr(self, k, v)

4098
        def init_dtype(self):
4099 4100
            self.dtype = np.float32

4101 4102 4103
        def if_enable_cinn(self):
            self.enable_cinn = enable_cinn

4104 4105 4106
        def convert_input_output(self):
            self.inputs = {'X': convert_float_to_uint16(self.inputs['X'])}
            self.outputs = {'Out': convert_float_to_uint16(self.outputs['Out'])}
4107 4108 4109 4110
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
4111 4112 4113
            self.check_output_with_place(
                place, atol=atol, check_prim=check_prim
            )
4114 4115 4116

        def test_check_grad(self):
            place = core.CUDAPlace(0)
4117 4118
            if grad_check:
                self.check_grad_with_place(
4119 4120 4121 4122 4123
                    place,
                    ['X'],
                    'Out',
                    max_relative_error=grad_atol,
                    check_prim=check_prim,
4124
                )
4125

4126
    cls_name = "{}_{}".format(parent.__name__, "BF16OP")
4127 4128 4129 4130
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


4131
create_test_act_bf16_class(TestActivation)
4132 4133 4134 4135
create_test_act_bf16_class(TestExpm1)
create_test_act_bf16_class(TestSigmoid, check_prim=True)
create_test_act_bf16_class(TestSilu, check_prim=True)
create_test_act_bf16_class(TestLogSigmoid)
4136
create_test_act_bf16_class(TestTanh, check_prim=True)
4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162
create_test_act_bf16_class(TestTanhshrink)
create_test_act_bf16_class(TestHardShrink)
create_test_act_bf16_class(TestSoftshrink)
create_test_act_bf16_class(TestSqrt, check_prim=True)
create_test_act_bf16_class(TestSqrtComp, check_prim=True)
create_test_act_bf16_class(TestAbs, check_prim=True)
create_test_act_bf16_class(TestCeil, grad_check=False)
create_test_act_bf16_class(TestFloor, grad_check=False, check_prim=True)
create_test_act_bf16_class(TestCos)
create_test_act_bf16_class(TestTan)
create_test_act_bf16_class(TestCosh)
create_test_act_bf16_class(TestAcos)
create_test_act_bf16_class(TestSin)
create_test_act_bf16_class(TestSinh)
create_test_act_bf16_class(TestAsin)
create_test_act_bf16_class(TestAtan)
create_test_act_bf16_class(TestAcosh)
create_test_act_bf16_class(TestAsinh)
create_test_act_bf16_class(TestAtanh)
create_test_act_bf16_class(TestRound, grad_check=False)
create_test_act_bf16_class(TestRelu, check_prim=True)
create_test_act_bf16_class(
    TestGelu,
    check_prim=True,
    rev_comp_rtol=1e-2,
    rev_comp_atol=1e-2,
4163 4164
    cinn_rtol=1e-2,
    cinn_atol=1e-2,
4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189
)
create_test_act_bf16_class(TestBRelu)
create_test_act_bf16_class(TestRelu6)
create_test_act_bf16_class(TestSoftRelu, check_dygraph=False)
create_test_act_bf16_class(TestELU)
create_test_act_bf16_class(TestCELU)
create_test_act_bf16_class(TestReciprocal)
create_test_act_bf16_class(TestLog, check_prim=True)
if core.is_compiled_with_rocm():
    create_test_act_bf16_class(TestLog2)
else:
    create_test_act_bf16_class(TestLog2)
create_test_act_bf16_class(TestLog10)
create_test_act_bf16_class(TestLog1p)
create_test_act_bf16_class(TestSquare)
create_test_act_bf16_class(TestPow, check_prim=True)
create_test_act_bf16_class(TestPow_factor_tensor)
create_test_act_bf16_class(TestSTanh)
create_test_act_bf16_class(TestSoftplus)
create_test_act_bf16_class(TestSoftsign)
create_test_act_bf16_class(TestThresholdedRelu)
create_test_act_bf16_class(TestHardSigmoid)
create_test_act_bf16_class(TestSwish)
create_test_act_bf16_class(TestHardSwish, check_prim=True)
create_test_act_bf16_class(TestMish)
4190 4191 4192 4193 4194 4195
create_test_act_bf16_class(TestLeakyRelu, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha1, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha2, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha3, check_prim=True)
create_test_act_bf16_class(TestLeakyRelu_ZeroDim, check_prim=True)
create_test_act_bf16_class(TestRsqrt, check_prim=True)
4196

Q
qijun 已提交
4197 4198
if __name__ == "__main__":
    unittest.main()