test_activation_op.py 137.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
Q
qijun 已提交
16
import unittest
17
import warnings
18
from contextlib import contextmanager
J
joejiong 已提交
19

Q
qijun 已提交
20
import numpy as np
21
from eager_op_test import OpTest, convert_float_to_uint16
22 23
from scipy.special import erf, expit

24
import paddle
25
import paddle.nn.functional as F
26 27
from paddle import fluid, static
from paddle.fluid import Program, core, program_guard
28
from paddle.fluid.layer_helper import LayerHelper
Q
qijun 已提交
29 30


31 32 33 34 35 36 37 38 39
@contextmanager
def dynamic_guad():
    paddle.disable_static()
    try:
        yield
    finally:
        paddle.enable_static()


40
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
41
    def test_errors(self):
42
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56
            with program_guard(Program(), Program()):
                # The input type of sqrt op must be Variable or numpy.ndarray.
                in1 = 1
                self.assertRaises(TypeError, paddle.sqrt, in1)
                # The input dtype of sqrt op must be float16, float32, float64.
                in2 = paddle.static.data(
                    name='input2', shape=[-1, 12, 10], dtype="int32"
                )
                self.assertRaises(TypeError, paddle.sqrt, in2)

                in3 = paddle.static.data(
                    name='input3', shape=[-1, 12, 10], dtype="float16"
                )
                paddle.sqrt(x=in3)
Z
Zhaolong Xing 已提交
57 58


C
chengduo 已提交
59
class TestActivation(OpTest):
Q
qijun 已提交
60 61
    def setUp(self):
        self.op_type = "exp"
62
        self.init_dtype()
63
        self.init_shape()
64
        self.init_kernel_type()
65
        self.if_enable_cinn()
C
chentianyu03 已提交
66
        self.python_api = paddle.exp
67
        self.public_python_api = paddle.exp
68

69
        np.random.seed(2049)
70
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
71 72 73 74
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
75

76 77
        self.convert_input_output()

Q
qijun 已提交
78
    def test_check_output(self):
W
wanghuancoder 已提交
79
        self.check_output()
Q
qijun 已提交
80 81

    def test_check_grad(self):
82 83
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
84 85 86 87
        self.check_grad(
            ['X'],
            'Out',
        )
Q
qijun 已提交
88

89
    def init_dtype(self):
90
        self.dtype = np.float64
91

92 93 94
    def init_shape(self):
        self.shape = [11, 17]

95 96 97
    def init_kernel_type(self):
        pass

98 99 100
    def convert_input_output(self):
        pass

101 102 103
    def if_enable_cinn(self):
        pass

Q
qijun 已提交
104

105 106 107 108 109
class TestActivation_ZeroDim(TestActivation):
    def init_shape(self):
        self.shape = []


110
class TestExpFp32_Prim(OpTest):
111 112 113 114 115 116
    def setUp(self):
        self.op_type = "exp"
        self.prim_op_type = "prim"
        self.init_dtype()
        self.init_shape()
        self.python_api = paddle.exp
117
        self.public_python_api = paddle.exp
118 119 120 121 122 123 124

        np.random.seed(2049)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
125
        self.if_enable_cinn()
126
        self.convert_input_output()
127 128 129 130 131 132 133 134 135 136 137 138 139

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)

    def init_dtype(self):
        self.dtype = np.float32

    def init_shape(self):
        self.shape = [12, 17]

140
    def if_enable_cinn(self):
141
        pass
142

143 144 145
    def convert_input_output(self):
        pass

146

147
class TestExpFp64_Prim(TestExpFp32_Prim):
148 149 150 151
    def init_dtype(self):
        self.dtype = np.float64


152
class TestExpPrim_ZeroDim(TestExpFp32_Prim):
153 154 155 156
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
157 158 159
class TestExpm1(TestActivation):
    def setUp(self):
        self.op_type = "expm1"
160
        self.python_api = paddle.expm1
R
ronnywang 已提交
161
        self.init_dtype()
162
        self.init_shape()
R
ronnywang 已提交
163 164

        np.random.seed(2049)
165
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
R
ronnywang 已提交
166 167 168 169
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
170
        self.convert_input_output()
R
ronnywang 已提交
171 172

    def test_check_grad(self):
W
wanghuancoder 已提交
173
        self.check_grad(['X'], 'Out')
174 175

    def test_check_output(self):
W
wanghuancoder 已提交
176
        self.check_output()
R
ronnywang 已提交
177 178


179 180 181 182 183
class TestExpm1_ZeroDim(TestExpm1):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
class TestExpm1API(unittest.TestCase):
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        def run(place):
200
            with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
201
                with paddle.static.program_guard(paddle.static.Program()):
202
                    X = paddle.static.data('X', self.shape, dtype=self.dtype)
W
wanghuancoder 已提交
203 204 205
                    out = paddle.expm1(X)
                    exe = paddle.static.Executor(place)
                    res = exe.run(feed={'X': self.x})
R
ronnywang 已提交
206
            for r in res:
207
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
208 209 210 211 212

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
213
        with dynamic_guad():
R
ronnywang 已提交
214

215 216 217 218 219 220 221 222 223
            def run(place):
                X = paddle.to_tensor(self.x)
                out = paddle.expm1(X)
                np.testing.assert_allclose(
                    self.out_ref, out.numpy(), rtol=1e-05
                )

            for place in self.place:
                run(place)
R
ronnywang 已提交
224 225

    def test_errors(self):
226
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
227
            with paddle.static.program_guard(paddle.static.Program()):
228
                X = paddle.static.data('X', self.shape, dtype='int32')
W
wanghuancoder 已提交
229
                self.assertRaises(TypeError, paddle.expm1, X)
R
ronnywang 已提交
230 231 232
        # The input dtype must be float16, float32, float64.


233
class TestParameter:
234
    def test_out_name(self):
235
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
236 237 238 239 240 241 242 243 244 245 246
            with fluid.program_guard(fluid.Program()):
                np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
                data = paddle.static.data(
                    name="X", shape=[-1, 1], dtype="float32"
                )
                out = eval("paddle.%s(data, name='Y')" % self.op_type)
                place = fluid.CPUPlace()
                exe = fluid.Executor(place)
                (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
                expected = eval("np.%s(np_x)" % self.op_type)
                np.testing.assert_allclose(result, expected, rtol=1e-05)
247 248 249 250 251 252 253

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
254
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
255 256


C
chengduo 已提交
257
class TestSigmoid(TestActivation):
Q
qijun 已提交
258 259
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
260 261
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.sigmoid
262
        self.public_python_api = paddle.nn.functional.sigmoid
263
        self.init_dtype()
264
        self.init_shape()
265
        self.if_enable_cinn()
266
        np.random.seed(1024)
267
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
268 269 270 271
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
272

273 274
        self.convert_input_output()

275 276 277
    def init_dtype(self):
        self.dtype = np.float32

278 279 280
    def if_enable_cinn(self):
        pass

281
    def test_check_grad(self):
282 283
        if self.dtype == np.float16:
            return
Z
zxcd 已提交
284
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True)
285

286

287 288 289 290 291
class TestSigmoid_ZeroDim(TestSigmoid):
    def init_shape(self):
        self.shape = []


292
@unittest.skipIf(
R
ronnywang 已提交
293 294
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
295
)
296 297 298
class TestSigmoidBF16(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
299 300
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.sigmoid
301
        self.public_python_api = paddle.nn.functional.sigmoid
302
        self.init_dtype()
303
        self.init_shape()
304
        self.if_enable_cinn()
305
        np.random.seed(1024)
306
        x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
307 308 309 310 311 312 313 314 315 316
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

317 318 319
    def init_shape(self):
        self.shape = [11, 17]

320 321 322
    def if_enable_cinn(self):
        self.enable_cinn = False

323 324
    def test_check_output(self):
        place = core.CUDAPlace(0)
325
        # elementwise_pow doesn't support bfloat16, skip check_prim here.
326
        self.check_output_with_place(place, check_prim=True)
327 328 329

    def test_check_grad(self):
        place = core.CUDAPlace(0)
330
        self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
331 332


333 334 335 336 337 338 339 340
'''
class TestSigmoidBF16_ZeroDim(TestSigmoidBF16):

    def init_shape(self):
        self.shape = []
'''


M
minghaoBD 已提交
341 342 343
class TestSilu(TestActivation):
    def setUp(self):
        self.op_type = "silu"
Z
zxcd 已提交
344 345
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.silu
346
        self.public_python_api = paddle.nn.functional.silu
M
minghaoBD 已提交
347
        self.init_dtype()
348
        self.init_shape()
349
        self.if_enable_cinn()
M
minghaoBD 已提交
350 351

        np.random.seed(1024)
352
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
M
minghaoBD 已提交
353
        out = x / (np.exp(-x) + 1)
354
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
M
minghaoBD 已提交
355 356
        self.outputs = {'Out': out}

357 358
        self.convert_input_output()

M
minghaoBD 已提交
359 360 361
    def init_dtype(self):
        self.dtype = np.float32

362
    def if_enable_cinn(self):
363 364
        pass

M
minghaoBD 已提交
365
    def test_check_grad(self):
Z
zxcd 已提交
366
        self.check_grad(['X'], 'Out', check_prim=True)
M
minghaoBD 已提交
367 368


369 370 371
class TestSilu_ZeroDim(TestSilu):
    def init_shape(self):
        self.shape = []
Z
zxcd 已提交
372 373


M
minghaoBD 已提交
374 375 376 377
class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
378 379 380
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
M
minghaoBD 已提交
381
            else paddle.CPUPlace()
382
        )
M
minghaoBD 已提交
383 384

    def test_static_api(self):
385
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
386
            with paddle.static.program_guard(paddle.static.Program()):
387
                x = paddle.static.data('X', [11, 17])
W
wanghuancoder 已提交
388 389 390 391 392 393 394 395
                out1 = F.silu(x)
                m = paddle.nn.Silu()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = self.x_np / (1 + np.exp(-self.x_np))
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
396 397

    def test_dygraph_api(self):
398
        paddle.disable_static()
M
minghaoBD 已提交
399 400 401 402 403 404
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
405
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
406
        paddle.enable_static()
M
minghaoBD 已提交
407 408

    def test_errors(self):
409
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
410 411 412 413
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.silu, 1)
                # The input dtype must be float16, float32, float64.
414
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
415 416 417 418
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.silu, x_int32)
                # support the input dtype is float16
419
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
420 421 422
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.silu(x_fp16)
M
minghaoBD 已提交
423 424


C
chengduo 已提交
425
class TestLogSigmoid(TestActivation):
426 427
    def setUp(self):
        self.op_type = "logsigmoid"
W
wanghuancoder 已提交
428
        self.python_api = paddle.nn.functional.log_sigmoid
429
        self.init_dtype()
430
        self.init_shape()
431

432
        np.random.seed(2048)
433
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
434
        out = np.log(1 / (1 + np.exp(-x)))
435
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
436
        self.outputs = {'Out': out}
437

438 439
        self.convert_input_output()

440
    def test_check_grad(self):
441 442
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
443
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
444 445


446 447 448 449 450
class TestLogSigmoid_ZeroDim(TestLogSigmoid):
    def init_shape(self):
        self.shape = []


451
class TestLogSigmoidAPI(unittest.TestCase):
452
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
453
    def setUp(self):
454
        np.random.seed(1024)
455
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
456 457 458
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
459
            else paddle.CPUPlace()
460
        )
461 462

    def test_static_api(self):
463
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
464
            with paddle.static.program_guard(paddle.static.Program()):
465
                x = paddle.static.data('X', [11, 17])
W
wanghuancoder 已提交
466 467 468 469 470 471 472 473
                out1 = F.log_sigmoid(x)
                m = paddle.nn.LogSigmoid()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
474 475

    def test_dygraph_api(self):
476
        paddle.disable_static()
477
        x = paddle.to_tensor(self.x_np)
478
        out1 = F.log_sigmoid(x)
479 480 481 482
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
483
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
484
        paddle.enable_static()
485 486

    def test_errors(self):
487
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
488 489 490 491
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.log_sigmoid, 1)
                # The input dtype must be float16, float32, float64.
492
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
493 494 495 496
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.log_sigmoid, x_int32)
                # support the input dtype is float16
497
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
498 499 500
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.log_sigmoid(x_fp16)
501 502


503
class TestTanh(TestActivation, TestParameter):
504 505
    def setUp(self):
        self.op_type = "tanh"
506
        self.prim_op_type = "prim"
W
wanghuancoder 已提交
507
        self.python_api = paddle.tanh
508
        self.public_python_api = paddle.tanh
509
        self.init_dtype()
510
        self.init_shape()
511
        self.if_enable_cinn()
512

513
        np.random.seed(1024)
514
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
515 516 517
        out = np.tanh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
518
        self.convert_input_output()
519 520

    def test_check_grad(self):
521 522
        if self.dtype == np.float16:
            return
523
        self.check_grad(['X'], 'Out', check_prim=True)
524

525
    def init_dtype(self):
526
        # TODO If dtype is float64, the output (Out) has diff at CPUPlace
527 528 529 530
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

531 532 533
    def if_enable_cinn(self):
        pass

534

535 536 537 538 539
class TestTanh_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


W
WangXi 已提交
540 541 542 543
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
544
        np.random.seed(1024)
W
WangXi 已提交
545
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
546 547 548
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
W
WangXi 已提交
549
            else paddle.CPUPlace()
550
        )
551 552 553 554
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
555 556

    def test_static_api(self):
557
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
558
            with paddle.static.program_guard(paddle.static.Program()):
559
                x = paddle.static.data('X', [10, 12], self.dtype)
W
wanghuancoder 已提交
560 561 562 563 564 565 566 567
                out1 = self.tanh(x)
                th = paddle.nn.Tanh()
                out2 = th(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.tanh(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
568 569

    def test_dygraph_api(self):
570 571 572 573 574 575 576 577 578
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = F.tanh(x)
            out2 = paddle.tanh(x)
            th = paddle.nn.Tanh()
            out3 = th(x)
            out_ref = np.tanh(self.x_np)
            for r in [out1, out2, out3]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
579 580

    def test_errors(self):
581
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
582 583 584 585
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.tanh, 1)
                # The input dtype must be float16, float32.
586
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
587 588 589 590
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, self.tanh, x_int32)
                # support the input dtype is float16
591
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
592 593 594
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                self.tanh(x_fp16)
595 596 597 598 599 600


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
601 602


603
class TestAtan(TestActivation, TestParameter):
604 605
    def setUp(self):
        self.op_type = "atan"
W
wanghuancoder 已提交
606
        self.python_api = paddle.atan
607
        self.init_dtype()
608
        self.init_shape()
609

610
        np.random.seed(1024)
611
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
612 613 614 615
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
616
        self.convert_input_output()
617 618 619 620

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
621
        self.check_grad(['X'], 'Out')
622

W
WuHaobo 已提交
623
    def test_out_name(self):
624
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
625 626 627 628 629 630 631 632 633 634 635
            with fluid.program_guard(fluid.Program()):
                np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
                data = paddle.static.data(
                    name="X", shape=[-1, 1], dtype="float32"
                )
                out = paddle.atan(data, name='Y')
                place = fluid.CPUPlace()
                exe = fluid.Executor(place)
                (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
                expected = np.arctan(np_x)
                self.assertEqual(result, expected)
W
WuHaobo 已提交
636

637 638 639 640 641 642 643 644
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

645

646
class TestAtan_ZeroDim(TestAtan):
647 648 649 650
    def init_shape(self):
        self.shape = []


651 652 653
class TestSinh(TestActivation):
    def setUp(self):
        self.op_type = "sinh"
W
wanghuancoder 已提交
654
        self.python_api = paddle.sinh
655
        self.init_dtype()
656
        self.init_shape()
657

658
        np.random.seed(1024)
659
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
660 661 662 663
        out = np.sinh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

664 665
        self.convert_input_output()

666 667 668 669 670
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

671 672 673 674 675 676 677

class TestSinh_ZeroDim(TestSinh):
    def init_shape(self):
        self.shape = []


class TestSinhAPI(unittest.TestCase):
678 679 680 681
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
682
            z = paddle.sinh(x).numpy()
683
            z_expected = np.sinh(np_x)
684
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
685 686

    def test_api(self):
687
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
            test_data_shape = [11, 17]
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                    "float32"
                )
                data_x = paddle.static.data(
                    name="data_x",
                    shape=test_data_shape,
                    dtype="float32",
                )

                pd_sinh_out = paddle.sinh(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (np_sinh_res,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[pd_sinh_out],
                )

            expected_res = np.sinh(input_x)
            np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
710 711 712 713

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
714 715 716
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
717 718
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
719
            loss = paddle.sinh(var)
720 721 722 723 724 725 726
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
727
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
728 729 730 731
            with program_guard(Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.sinh, 1)
                # The input dtype must be float16, float32, float64.
732
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
733 734 735 736
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.sinh, x_int32)
                # support the input dtype is float16
737
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
738 739 740
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.sinh(x_fp16)
741 742 743 744 745


class TestCosh(TestActivation):
    def setUp(self):
        self.op_type = "cosh"
W
wanghuancoder 已提交
746
        self.python_api = paddle.cosh
747
        self.init_dtype()
748
        self.init_shape()
749

750
        np.random.seed(1024)
751
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
752 753 754 755
        out = np.cosh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

756 757
        self.convert_input_output()

758 759 760 761 762
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

763 764 765 766 767 768 769

class TestCosh_ZeroDim(TestCosh):
    def init_shape(self):
        self.shape = []


class TestCoshAPI(unittest.TestCase):
770 771 772 773
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
774
            z = paddle.cosh(x).numpy()
775
            z_expected = np.cosh(np_x)
776
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
777 778

    def test_api(self):
779
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
            test_data_shape = [11, 17]
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                    "float32"
                )
                data_x = paddle.static.data(
                    name="data_x",
                    shape=test_data_shape,
                    dtype="float32",
                )

                pd_cosh_out = paddle.cosh(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (np_cosh_res,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[pd_cosh_out],
                )

            expected_res = np.cosh(input_x)
            np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
802 803 804 805

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
806 807 808
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
809 810
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
811
            loss = paddle.cosh(var)
812 813 814 815 816 817 818
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
819
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
820 821 822 823
            with program_guard(Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.cosh, 1)
                # The input dtype must be float16, float32, float64.
824
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
825 826 827 828
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.cosh, x_int32)
                # support the input dtype is float16
829
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
830 831 832
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.cosh(x_fp16)
833 834


835 836 837 838 839 840
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
K
Kavya Srinet 已提交
841 842
    def setUp(self):
        self.op_type = "tanh_shrink"
W
wanghuancoder 已提交
843
        self.python_api = paddle.nn.functional.tanhshrink
844
        self.init_dtype()
845
        self.init_shape()
846

847
        np.random.seed(1024)
848
        x = np.random.uniform(10, 20, self.shape).astype(self.dtype)
849
        out = ref_tanhshrink(x)
850
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
851
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
852

853 854
        self.convert_input_output()

K
Kavya Srinet 已提交
855
    def test_check_grad(self):
856 857
        if self.dtype == np.float16:
            return
858
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
859

860

861 862 863 864 865
class TestTanhshrink_ZeroDim(TestTanhshrink):
    def init_shape(self):
        self.shape = []


866 867 868
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
869
        np.random.seed(1024)
870
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
871 872 873
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
874
            else paddle.CPUPlace()
875
        )
876 877

    def test_static_api(self):
878
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
879
            with paddle.static.program_guard(paddle.static.Program()):
880
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
881 882 883 884 885 886 887 888
                out1 = F.tanhshrink(x)
                tanhshrink = paddle.nn.Tanhshrink()
                out2 = tanhshrink(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_tanhshrink(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
889 890

    def test_dygraph_api(self):
891 892 893 894 895 896 897 898
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = F.tanhshrink(x)
            tanhshrink = paddle.nn.Tanhshrink()
            out2 = tanhshrink(x)
            out_ref = ref_tanhshrink(self.x_np)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
899 900

    def test_errors(self):
901
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
902 903 904 905
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.tanhshrink, 1)
                # The input dtype must be float16, float32, float64.
906
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
907 908 909 910
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.tanhshrink, x_int32)
                # support the input dtype is float16
911
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
912 913 914
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.tanhshrink(x_fp16)
915 916


917 918 919 920 921 922
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
923
class TestHardShrink(TestActivation):
924 925
    def setUp(self):
        self.op_type = "hard_shrink"
W
wanghuancoder 已提交
926
        self.python_api = paddle.nn.functional.hardshrink
927
        self.init_dtype()
928
        self.init_shape()
929

930 931
        self.threshold = 0.5
        self.set_attrs()
932
        np.random.seed(1024)
933
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10
934
        out = ref_hardshrink(x, self.threshold)
935 936
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
937

938
        self.attrs = {'threshold': self.threshold}
939 940

        self.convert_input_output()
941

942 943 944
    def init_shape(self):
        self.shape = [10, 12]

945 946 947
    def set_attrs(self):
        pass

948
    def test_check_grad(self):
949 950
        if self.dtype == np.float16:
            return
951
        self.check_grad(['X'], 'Out')
952 953


954 955 956 957 958
class TestHardShrink_threshold_negative(TestHardShrink):
    def set_attrs(self):
        self.threshold = -0.1


959 960 961 962 963 964 965 966
'''
class TestHardShrink_ZeroDim(TestHardShrink):

    def init_shape(self):
        self.shape = []
'''


967 968 969
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
970
        np.random.seed(1024)
971
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
972 973 974
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
975
            else paddle.CPUPlace()
976
        )
977 978

    def test_static_api(self):
979
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
980
            with paddle.static.program_guard(paddle.static.Program()):
981
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
982 983 984 985 986 987 988 989
                out1 = F.hardshrink(x)
                hd = paddle.nn.Hardshrink()
                out2 = hd(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardshrink(self.x_np, 0.5)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
990 991

    def test_dygraph_api(self):
992 993 994 995 996 997 998 999
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = F.hardshrink(x)
            hd = paddle.nn.Hardshrink()
            out2 = hd(x)
            out_ref = ref_hardshrink(self.x_np, 0.5)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1000

1001 1002 1003 1004 1005 1006
            out1 = F.hardshrink(x, 0.6)
            hd = paddle.nn.Hardshrink(0.6)
            out2 = hd(x)
            out_ref = ref_hardshrink(self.x_np, 0.6)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1007

1008
    def test_errors(self):
1009
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1010 1011 1012 1013
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardshrink, 1)
                # The input dtype must be float16, float32, float64.
1014
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1015 1016 1017 1018
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardshrink, x_int32)
                # support the input dtype is float16
1019
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1020 1021 1022
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardshrink(x_fp16)
1023 1024


1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
1036
        np.random.seed(1024)
1037
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
1038 1039 1040
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1041
            else paddle.CPUPlace()
1042
        )
1043 1044

    def test_static_api(self):
1045
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1046
            with paddle.static.program_guard(paddle.static.Program()):
1047
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
1048 1049 1050 1051 1052 1053 1054 1055
                out1 = F.hardtanh(x)
                m = paddle.nn.Hardtanh()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardtanh(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1056 1057

    def test_dygraph_api(self):
1058 1059 1060 1061 1062 1063 1064 1065
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = F.hardtanh(x)
            m = paddle.nn.Hardtanh()
            out2 = m(x)
            out_ref = ref_hardtanh(self.x_np)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1066

1067 1068 1069 1070 1071 1072
            out1 = F.hardtanh(x, -2.0, 2.0)
            m = paddle.nn.Hardtanh(-2.0, 2.0)
            out2 = m(x)
            out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1073 1074

    def test_errors(self):
1075
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1076 1077 1078 1079
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardtanh, 1)
                # The input dtype must be float16, float32, float64.
1080
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1081 1082 1083 1084
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardtanh, x_int32)
                # support the input dtype is float16
1085
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1086 1087 1088
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardtanh(x_fp16)
1089 1090


1091 1092 1093
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
1094 1095
        out - threshold
    )
1096 1097 1098 1099
    return out


class TestSoftshrink(TestActivation):
1100 1101
    def setUp(self):
        self.op_type = "softshrink"
1102
        self.python_api = paddle.nn.functional.softshrink
1103
        self.init_dtype()
1104
        self.init_shape()
1105

1106
        threshold = 0.8
1107

1108
        np.random.seed(1023)
1109
        x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype)
1110
        out = ref_softshrink(x, threshold)
1111 1112

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
1113
        self.outputs = {'Out': out}
1114

1115 1116
        self.attrs = {"lambda": threshold}

1117
    def test_check_grad(self):
1118 1119
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1120
        self.check_grad(['X'], 'Out')
1121

1122

1123 1124 1125 1126 1127
class TestSoftshrink_ZeroDim(TestSoftshrink):
    def init_shape(self):
        self.shape = []


1128 1129 1130 1131
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
1132
        np.random.seed(1024)
1133
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
1134 1135 1136
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1137
            else paddle.CPUPlace()
1138
        )
1139 1140

    def test_static_api(self):
1141
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1142
            with paddle.static.program_guard(paddle.static.Program()):
1143
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
1144 1145 1146 1147 1148 1149 1150 1151
                out1 = F.softshrink(x, self.threshold)
                softshrink = paddle.nn.Softshrink(self.threshold)
                out2 = softshrink(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softshrink(self.x_np, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1152 1153

    def test_dygraph_api(self):
1154 1155 1156 1157 1158 1159 1160 1161
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = F.softshrink(x, self.threshold)
            softshrink = paddle.nn.Softshrink(self.threshold)
            out2 = softshrink(x)
            out_ref = ref_softshrink(self.x_np, self.threshold)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1162

1163
    def test_errors(self):
1164
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1165 1166 1167 1168
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softshrink, 1)
                # The input dtype must be float16, float32, float64.
1169
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1170 1171 1172 1173
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softshrink, x_int32)
                # The threshold must be no less than zero
1174
                x_fp32 = paddle.static.data(
W
wanghuancoder 已提交
1175 1176 1177 1178
                    name='x_fp32', shape=[12, 10], dtype='float32'
                )
                self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
                # support the input dtype is float16
1179
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1180 1181 1182
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softshrink(x_fp16)
1183 1184


1185
class TestSqrt(TestActivation, TestParameter):
1186 1187
    def setUp(self):
        self.op_type = "sqrt"
1188
        self.prim_op_type = "prim"
1189
        self.python_api = paddle.sqrt
1190 1191
        self.public_python_api = paddle.sqrt

1192
        self.init_dtype()
1193
        self.init_shape()
1194
        self.if_enable_cinn()
1195

1196
        np.random.seed(1023)
1197
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
1198 1199 1200 1201
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1202
        self.convert_input_output()
1203

1204 1205 1206
    def if_enable_cinn(self):
        pass

1207
    def test_check_grad(self):
1208 1209
        if self.dtype == np.float16:
            return
1210
        self.check_grad(['X'], 'Out', check_prim=True)
1211 1212

    def test_check_output(self):
W
wanghuancoder 已提交
1213
        self.check_output()
1214

1215

1216 1217 1218 1219 1220
class TestSqrtPrimFp32(TestActivation):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "prim"
        self.python_api = paddle.sqrt
1221
        self.public_python_api = paddle.sqrt
1222 1223
        self.init_dtype()
        self.init_shape()
1224
        self.if_enable_cinn()
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1235
        self.check_grad(['X'], 'Out', check_prim=True)
1236 1237

    def test_check_output(self):
W
wanghuancoder 已提交
1238
        self.check_output()
1239 1240 1241 1242

    def init_dtype(self):
        self.dtype = np.float32

1243 1244 1245
    def if_enable_cinn(self):
        pass

1246

1247 1248 1249 1250
class TestSqrt_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []

1251

1252
@unittest.skipIf(
R
ronnywang 已提交
1253 1254
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
1255
)
1256 1257 1258
class TestSqrtBF16(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
1259
        self.prim_op_type = "prim"
1260
        self.python_api = paddle.sqrt
1261
        self.public_python_api = paddle.sqrt
1262
        self.init_dtype()
1263
        self.init_shape()
1264
        self.if_enable_cinn()
1265 1266

        np.random.seed(1023)
1267
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

1278 1279 1280
    def init_shape(self):
        self.shape = [11, 17]

1281 1282 1283
    def if_enable_cinn(self):
        self.enable_cinn = False

1284 1285
    def test_check_output(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
1286
        self.check_output_with_place(place)
1287 1288 1289

    def test_check_grad(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
1290
        self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
1291 1292


M
mhy-666 已提交
1293 1294 1295 1296 1297
class TestSqrtComp(TestActivation, TestParameter):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "comp"
        self.python_api = paddle.sqrt
1298
        self.public_python_api = paddle.sqrt
M
mhy-666 已提交
1299 1300
        self.init_dtype()
        self.init_shape()
1301
        self.if_enable_cinn()
M
mhy-666 已提交
1302 1303 1304 1305 1306 1307 1308

        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1309
        self.convert_input_output()
1310 1311 1312

    def if_enable_cinn(self):
        pass
M
mhy-666 已提交
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=True, check_prim=True)

    def test_check_output(self):
        self.check_output(check_dygraph=True, check_prim=True)


class TestSqrtCompFp32(TestActivation):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "comp"
        self.python_api = paddle.sqrt
1328
        self.public_python_api = paddle.sqrt
M
mhy-666 已提交
1329 1330
        self.init_dtype()
        self.init_shape()
1331
        self.if_enable_cinn()
M
mhy-666 已提交
1332 1333 1334 1335 1336 1337
        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1338 1339 1340

    def if_enable_cinn(self):
        pass
M
mhy-666 已提交
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=True, check_prim=True)

    def test_check_output(self):
        self.check_output(check_dygraph=True, check_prim=True)

    def init_dtype(self):
        self.dtype = np.float32


Z
zhoukunsheng 已提交
1354 1355 1356
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
1357
        self.prim_op_type = "comp"
Z
zyfncg 已提交
1358
        self.python_api = paddle.rsqrt
1359
        self.public_python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1360
        self.init_dtype()
1361
        self.init_shape()
1362
        self.if_enable_cinn()
Z
zhoukunsheng 已提交
1363

1364
        np.random.seed(1024)
1365
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
Z
zhoukunsheng 已提交
1366 1367 1368 1369
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1370
        self.convert_input_output()
Z
zhoukunsheng 已提交
1371

1372 1373 1374
    def init_shape(self):
        self.shape = [10, 12]

1375 1376 1377
    def if_enable_cinn(self):
        pass

1378 1379 1380
    def test_check_output(self):
        self.check_output(check_prim=True)

Z
zhoukunsheng 已提交
1381 1382 1383
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1384 1385 1386 1387 1388 1389
        self.check_grad(
            ['X'],
            'Out',
            max_relative_error=0.0005,
            check_prim=True,
        )
Z
zhoukunsheng 已提交
1390 1391


1392 1393 1394
class TestRsqrt_ZeroDim(TestRsqrt):
    def init_shape(self):
        self.shape = []
1395 1396 1397

    def if_enable_cinn(self):
        self.enable_cinn = False
1398 1399


C
chengduo 已提交
1400
class TestAbs(TestActivation):
1401 1402
    def setUp(self):
        self.op_type = "abs"
1403 1404
        self.prim_op_type = "prim"
        self.python_api = paddle.abs
1405
        self.public_python_api = paddle.abs
1406
        self.init_dtype()
1407
        self.init_shape()
1408
        self.if_enable_cinn()
1409

1410
        np.random.seed(1024)
1411
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
C
chengduo 已提交
1412
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1413
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1414
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1415 1416
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1417 1418 1419 1420
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1421
        self.convert_input_output()
1422

1423 1424 1425
    def init_shape(self):
        self.shape = [4, 25]

1426 1427 1428
    def if_enable_cinn(self):
        pass

1429
    def test_check_grad(self):
1430 1431
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1432
        self.check_grad(['X'], 'Out', check_prim=True)
1433

1434

1435 1436 1437 1438 1439
class TestAbs_ZeroDim(TestAbs):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1440
class TestCeil(TestActivation):
D
dzhwinter 已提交
1441 1442
    def setUp(self):
        self.op_type = "ceil"
1443
        self.python_api = paddle.ceil
1444
        self.init_dtype()
1445
        self.init_shape()
1446

1447
        np.random.seed(1024)
1448
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1449 1450 1451 1452
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1453
        self.convert_input_output()
D
dzhwinter 已提交
1454

1455 1456 1457
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1458
    # The same reason with TestFloor
C
chengduo 已提交
1459
    def test_check_grad(self):
1460 1461 1462
        pass


1463 1464 1465 1466 1467
class TestCeil_ZeroDim(TestCeil):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1468
class TestFloor(TestActivation):
D
dzhwinter 已提交
1469 1470
    def setUp(self):
        self.op_type = "floor"
1471
        self.prim_op_type = "prim"
1472
        self.python_api = paddle.floor
1473
        self.public_python_api = paddle.floor
1474
        self.init_dtype()
1475
        self.init_shape()
1476
        self.if_enable_cinn()
1477

1478
        np.random.seed(1024)
1479
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1480 1481 1482 1483
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1484
        self.convert_input_output()
D
dzhwinter 已提交
1485

1486 1487 1488
    def init_shape(self):
        self.shape = [10, 12]

1489 1490 1491
    def if_enable_cinn(self):
        pass

D
dzhwinter 已提交
1492
    # the gradient on floor, ceil, round is undefined.
1493
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1494 1495
    # The same reason with TestFloor
    def test_check_grad(self):
1496 1497
        pass

1498
    def test_check_grad_for_prim(self):
1499 1500 1501 1502
        # the gradient on floor, ceil, round is undefined.
        # we return zero as gradient, but the numpy return nan.
        # for prim, we compare result with eager python api,
        # so, we use only_prim flag to express we only test prim.
1503 1504 1505 1506 1507 1508 1509 1510
        if core.is_compiled_with_cuda():
            self.check_grad_with_place(
                paddle.CUDAPlace(0),
                ['X'],
                'Out',
                check_prim=True,
                only_check_prim=True,
            )
1511 1512


1513
class TestFloor_ZeroDim(TestFloor):
1514 1515 1516 1517
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1518
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
1519 1520
    def setUp(self):
        self.op_type = "cos"
W
wanghuancoder 已提交
1521
        self.python_api = paddle.cos
1522 1523
        self.public_python_api = paddle.cos
        self.prim_op_type = "prim"
1524
        self.init_dtype()
1525
        self.init_shape()
1526
        self.if_enable_cinn()
1527

1528
        np.random.seed(1024)
1529
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1530 1531 1532
        out = np.cos(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1533
        self.convert_input_output()
C
add sin  
chengduoZH 已提交
1534

1535 1536 1537
    def init_shape(self):
        self.shape = [10, 12]

C
add sin  
chengduoZH 已提交
1538
    def test_check_grad(self):
1539 1540
        if self.dtype == np.float16:
            return
1541
        self.check_grad(['X'], 'Out', check_prim=True)
C
add sin  
chengduoZH 已提交
1542

1543 1544 1545
    def if_enable_cinn(self):
        pass

1546

1547 1548 1549 1550 1551
class TestCos_ZeroDim(TestCos):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
1552 1553 1554 1555
class TestTan(TestActivation):
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
W
wanghuancoder 已提交
1556
        self.python_api = paddle.tan
J
joejiong 已提交
1557
        self.init_dtype()
1558 1559
        self.init_shape()

J
joejiong 已提交
1560
        self.dtype = 'float32'
1561
        self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1562 1563 1564
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
J
joejiong 已提交
1565
            else paddle.CPUPlace()
1566
        )
J
joejiong 已提交
1567 1568 1569 1570 1571

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}
1572
        self.convert_input_output()
J
joejiong 已提交
1573

1574 1575 1576
    def init_shape(self):
        self.shape = [10, 12]

J
joejiong 已提交
1577 1578 1579 1580 1581
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592

class TestTan_ZeroDim(TestTan):
    def init_shape(self):
        self.shape = []


class TestTanAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(1024)
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1593 1594 1595
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1596
            else paddle.CPUPlace()
1597
        )
1598

J
joejiong 已提交
1599
    def test_dygraph_api(self):
1600 1601 1602 1603 1604
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out_test = paddle.tan(x)
            out_ref = np.tan(self.x_np)
            np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1605 1606

    def test_static_api(self):
1607
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1608 1609 1610 1611 1612 1613 1614
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', [11, 17], self.dtype)
                out = paddle.tan(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = np.tan(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1615 1616 1617 1618

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
1619 1620 1621
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
J
joejiong 已提交
1622 1623 1624 1625 1626 1627 1628 1629
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1630 1631 1632
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
W
wanghuancoder 已提交
1633
        self.python_api = paddle.acos
1634
        self.init_dtype()
1635
        self.init_shape()
1636

1637
        np.random.seed(1024)
1638
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1639 1640 1641 1642
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1643
        self.convert_input_output()
1644

1645 1646 1647
    def init_shape(self):
        self.shape = [10, 12]

1648 1649 1650
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1651
        self.check_grad(['X'], 'Out')
1652 1653


1654 1655 1656 1657 1658
class TestAcos_ZeroDim(TestAcos):
    def init_shape(self):
        self.shape = []


1659
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
1660 1661
    def setUp(self):
        self.op_type = "sin"
W
wanghuancoder 已提交
1662
        self.python_api = paddle.sin
1663 1664
        self.public_python_api = paddle.sin
        self.prim_op_type = "prim"
1665
        self.init_dtype()
1666
        self.init_shape()
1667
        self.if_enable_cinn()
1668

1669
        np.random.seed(1024)
1670
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1671 1672 1673
        out = np.sin(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1674
        self.convert_input_output()
C
add cos  
chengduoZH 已提交
1675

1676 1677 1678
    def init_shape(self):
        self.shape = [10, 12]

C
add cos  
chengduoZH 已提交
1679
    def test_check_grad(self):
1680 1681
        if self.dtype == np.float16:
            return
1682
        self.check_grad(['X'], 'Out', check_prim=True)
C
add cos  
chengduoZH 已提交
1683

1684 1685 1686
    def if_enable_cinn(self):
        pass

C
add cos  
chengduoZH 已提交
1687

1688 1689 1690 1691 1692
class TestSin_ZeroDim(TestSin):
    def init_shape(self):
        self.shape = []


1693 1694 1695
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
W
wanghuancoder 已提交
1696
        self.python_api = paddle.asin
1697
        self.init_dtype()
1698
        self.init_shape()
1699

1700
        np.random.seed(2048)
1701
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1702 1703 1704 1705
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1706
        self.convert_input_output()
1707

1708 1709 1710
    def init_shape(self):
        self.shape = [10, 12]

1711 1712 1713
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1714
        self.check_grad(['X'], 'Out')
1715 1716


1717 1718 1719 1720 1721
class TestAsin_ZeroDim(TestAsin):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1722 1723 1724
class TestAcosh(TestActivation):
    def setUp(self):
        self.op_type = "acosh"
W
wanghuancoder 已提交
1725
        self.python_api = paddle.acosh
X
xiaoting 已提交
1726
        self.init_dtype()
1727
        self.init_shape()
X
xiaoting 已提交
1728 1729

        np.random.seed(1024)
1730
        x = np.random.uniform(2, 3, self.shape).astype(self.dtype)
X
xiaoting 已提交
1731 1732 1733 1734
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1735
        self.convert_input_output()
X
xiaoting 已提交
1736

1737 1738 1739
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1740 1741 1742 1743 1744 1745
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1746 1747 1748 1749 1750
class TestAcosh_ZeroDim(TestAcosh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1751 1752 1753
class TestAsinh(TestActivation):
    def setUp(self):
        self.op_type = "asinh"
W
wanghuancoder 已提交
1754
        self.python_api = paddle.asinh
X
xiaoting 已提交
1755
        self.init_dtype()
1756
        self.init_shape()
X
xiaoting 已提交
1757 1758

        np.random.seed(1024)
1759
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
X
xiaoting 已提交
1760 1761 1762 1763
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1764
        self.convert_input_output()
X
xiaoting 已提交
1765

1766 1767 1768
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1769 1770 1771 1772 1773 1774
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1775 1776 1777 1778 1779
class TestAsinh_ZeroDim(TestAsinh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1780 1781 1782
class TestAtanh(TestActivation):
    def setUp(self):
        self.op_type = "atanh"
W
wanghuancoder 已提交
1783
        self.python_api = paddle.atanh
X
xiaoting 已提交
1784
        self.init_dtype()
1785
        self.init_shape()
X
xiaoting 已提交
1786 1787

        np.random.seed(400)
1788
        x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype)
X
xiaoting 已提交
1789 1790 1791 1792
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1793
        self.convert_input_output()
X
xiaoting 已提交
1794

1795 1796 1797
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1798 1799 1800 1801 1802 1803
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1804 1805 1806 1807 1808
class TestAtanh_ZeroDim(TestAtanh):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1809
class TestRound(TestActivation):
D
dzhwinter 已提交
1810 1811
    def setUp(self):
        self.op_type = "round"
1812
        self.python_api = paddle.round
1813
        self.init_dtype()
1814
        self.init_shape()
1815

1816
        np.random.seed(1024)
1817
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1818 1819 1820 1821
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1822
        self.convert_input_output()
D
dzhwinter 已提交
1823

1824 1825 1826
    def init_shape(self):
        self.shape = [10, 12]

C
chengduo 已提交
1827
    def test_check_grad(self):
1828 1829 1830
        pass


1831 1832 1833 1834 1835
class TestRound_ZeroDim(TestRound):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1836
class TestRelu(TestActivation):
1837
    def setUp(self):
Q
qijun 已提交
1838
        self.op_type = "relu"
K
Kang Zhao 已提交
1839 1840
        self.python_api = paddle.nn.functional.relu
        self.prim_op_type = "comp"
1841
        self.public_python_api = paddle.nn.functional.relu
K
Kexin Zhao 已提交
1842
        self.init_dtype()
1843
        self.init_shape()
1844
        self.if_enable_cinn()
K
Kexin Zhao 已提交
1845

1846
        np.random.seed(1024)
1847 1848 1849 1850 1851
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0)
        self.inputs = {'X': x}
K
Kexin Zhao 已提交
1852 1853

        self.outputs = {'Out': out}
1854
        self.convert_input_output()
1855 1856

    def test_check_grad(self):
K
Kexin Zhao 已提交
1857 1858
        if self.dtype == np.float16:
            return
K
Kang Zhao 已提交
1859 1860 1861 1862 1863
        self.check_grad(['X'], 'Out', check_prim=True)

    def test_check_output(self):
        self.check_output(check_prim=True)

1864 1865
    def if_enable_cinn(self):
        pass
A
Adam 已提交
1866 1867


1868 1869 1870 1871 1872
class TestRelu_ZeroDim(TestRelu):
    def init_shape(self):
        self.shape = []


1873 1874 1875
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1876
        np.random.seed(1024)
1877
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1878 1879 1880
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1881
            else paddle.CPUPlace()
1882
        )
1883 1884 1885 1886
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1887 1888

    def test_static_api(self):
1889
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1890
            with paddle.static.program_guard(paddle.static.Program()):
1891
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
1892 1893 1894 1895 1896 1897 1898 1899
                out1 = self.relu(x)
                m = paddle.nn.ReLU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.maximum(self.x_np, 0)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1900 1901

    def test_dygraph_api(self):
1902 1903 1904 1905 1906 1907 1908 1909
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            m = paddle.nn.ReLU()
            out1 = m(x)
            out2 = self.relu(x)
            out_ref = np.maximum(self.x_np, 0)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1910

1911
    def test_errors(self):
1912 1913
        with paddle.fluid.framework._static_guard():
            with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1914 1915 1916 1917
                with paddle.static.program_guard(paddle.static.Program()):
                    # The input type must be Variable.
                    self.assertRaises(TypeError, self.relu, 1)
                    # The input dtype must be float16, float32, float64.
1918
                    x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1919 1920 1921 1922
                        name='x_int32', shape=[10, 12], dtype='int32'
                    )
                    self.assertRaises(TypeError, self.relu, x_int32)
                    # support the input dtype is float16
1923
                    x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1924 1925 1926
                        name='x_fp16', shape=[10, 12], dtype='float16'
                    )
                    self.relu(x_fp16)
1927 1928 1929 1930 1931 1932


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1933 1934


1935 1936 1937 1938 1939 1940
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1941
class TestLeakyRelu(TestActivation):
1942 1943 1944
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1945 1946
    def setUp(self):
        self.op_type = "leaky_relu"
W
wanghuancoder 已提交
1947
        self.python_api = paddle.nn.functional.leaky_relu
1948 1949
        self.public_python_api = paddle.nn.functional.leaky_relu
        self.prim_op_type = "comp"
A
Adam 已提交
1950
        self.init_dtype()
1951
        self.init_shape()
1952
        self.if_enable_cinn()
1953
        alpha = self.get_alpha()
A
Adam 已提交
1954

1955
        np.random.seed(1024)
1956
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
A
Adam 已提交
1957
        # The same reason with TestAbs
1958 1959
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1960

1961
        self.inputs = {'X': x}
A
Adam 已提交
1962
        self.outputs = {'Out': out}
1963
        self.attrs = {'alpha': alpha}
1964
        self.convert_input_output()
A
Adam 已提交
1965

1966 1967 1968
    def if_enable_cinn(self):
        pass

1969 1970 1971
    def test_check_output(self):
        self.check_output(check_prim=True)

A
Adam 已提交
1972 1973 1974
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1975
        self.check_grad(['X'], 'Out', check_prim=True)
1976 1977


1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992
class TestLeakyReluAlpha1(TestLeakyRelu):
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
    def get_alpha(self):
        return -2.0


1993 1994 1995 1996
class TestLeakyRelu_ZeroDim(TestLeakyRelu):
    def init_shape(self):
        self.shape = []

1997
    def if_enable_cinn(self):
1998 1999
        self.enable_cinn = False

2000

2001 2002 2003
class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    def setUp(self):
2004
        np.random.seed(1024)
2005
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
2006 2007 2008
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2009
            else paddle.CPUPlace()
2010
        )
2011 2012

    def test_static_api(self):
2013
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2014
            with paddle.static.program_guard(paddle.static.Program()):
2015
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2016 2017 2018 2019 2020 2021 2022 2023
                out1 = F.leaky_relu(x)
                m = paddle.nn.LeakyReLU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_leaky_relu(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2024 2025

    def test_dygraph_api(self):
2026 2027 2028 2029 2030 2031 2032 2033
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = F.leaky_relu(x)
            m = paddle.nn.LeakyReLU()
            out2 = m(x)
            out_ref = ref_leaky_relu(self.x_np)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2034

2035 2036 2037 2038 2039 2040
            out1 = F.leaky_relu(x, 0.6)
            m = paddle.nn.LeakyReLU(0.6)
            out2 = m(x)
            out_ref = ref_leaky_relu(self.x_np, 0.6)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2041

2042
    def test_errors(self):
2043
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2044 2045 2046 2047
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.leaky_relu, 1)
                # The input dtype must be float16, float32, float64.
2048
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2049 2050 2051 2052
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.leaky_relu, x_int32)
                # support the input dtype is float16
2053
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2054 2055 2056
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.leaky_relu(x_fp16)
2057 2058


2059 2060
def gelu(x, approximate):
    if approximate:
2061 2062 2063 2064 2065 2066 2067 2068
        y_ref = (
            0.5
            * x
            * (
                1.0
                + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
            )
        )
2069 2070 2071 2072 2073 2074
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
2075 2076
    def setUp(self):
        self.op_type = "gelu"
2077 2078
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.gelu
2079
        self.public_python_api = paddle.nn.functional.gelu
C
Clementine 已提交
2080
        self.init_dtype()
2081
        self.init_shape()
2082
        approximate = True
2083
        np.random.seed(1024)
2084
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2085
        out = gelu(x, approximate)
C
Clementine 已提交
2086

2087
        self.inputs = {'X': x}
2088 2089 2090
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

2091 2092 2093 2094
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu device, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2095 2096 2097
        # Cumulative error occurs between comp and cinn, so that we also set cinn_rtol to 1e-8 as rev_comp_rtol = 1e-8
        self.cinn_rtol = 1e-8
        self.cinn_atol = 1e-8
C
cxxly 已提交
2098

2099 2100 2101
    def test_check_output(self):
        self.check_output(check_prim=True)

2102 2103 2104
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2105
        self.check_grad(['X'], 'Out', check_prim=True)
2106 2107 2108 2109 2110


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
2111 2112
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.gelu
2113
        self.public_python_api = paddle.nn.functional.gelu
2114
        self.init_dtype()
2115
        self.init_shape()
2116
        approximate = False
2117
        np.random.seed(2048)
2118
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2119
        out = gelu(x, approximate)
2120
        self.if_enable_cinn()
C
Clementine 已提交
2121

2122
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
C
Clementine 已提交
2123
        self.outputs = {'Out': out}
2124
        self.convert_input_output()
2125
        self.attrs = {"approximate": approximate}
2126 2127 2128 2129
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2130 2131 2132
        # Cumulative error occurs between comp and cinn, so that we also set cinn_rtol to 1e-8 as rev_comp_rtol = 1e-8
        self.cinn_rtol = 1e-8
        self.cinn_atol = 1e-8
C
Clementine 已提交
2133

2134
    def if_enable_cinn(self):
2135
        pass
2136 2137 2138 2139

    def test_check_output(self):
        self.check_output(check_prim=True)

C
Clementine 已提交
2140 2141 2142
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2143
        self.check_grad(['X'], 'Out', check_prim=True)
C
Clementine 已提交
2144 2145


2146 2147 2148 2149 2150
class TestGelu_ZeroDim(TestGelu):
    def init_shape(self):
        self.shape = []


2151 2152 2153
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
2154
        np.random.seed(1024)
2155
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
2156 2157 2158
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2159
            else paddle.CPUPlace()
2160
        )
C
cxxly 已提交
2161 2162
        self.enable_cinn = False

2163 2164 2165 2166
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2167 2168

    def test_static_api(self):
2169
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2170
            with paddle.static.program_guard(paddle.static.Program()):
2171
                x = paddle.static.data('X', [11, 17], dtype="float32")
W
wanghuancoder 已提交
2172 2173 2174 2175 2176 2177 2178 2179
                out1 = F.gelu(x)
                m = paddle.nn.GELU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = gelu(self.x_np, False)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2180 2181

    def test_dygraph_api(self):
2182 2183 2184 2185 2186 2187 2188 2189
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = F.gelu(x)
            m = paddle.nn.GELU()
            out2 = m(x)
            out_ref = gelu(self.x_np, False)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2190

2191 2192 2193 2194 2195 2196
            out1 = F.gelu(x, True)
            m = paddle.nn.GELU(True)
            out2 = m(x)
            out_ref = gelu(self.x_np, True)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2197 2198

    def test_errors(self):
2199
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2200 2201 2202 2203
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.gelu, 1)
                # The input dtype must be float16, float32, float64.
2204
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2205 2206 2207 2208
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.gelu, x_int32)
                # support the input dtype is float16
2209
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2210 2211 2212
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.gelu(x_fp16)
2213 2214


C
chengduo 已提交
2215
class TestBRelu(TestActivation):
2216 2217
    def setUp(self):
        self.op_type = "brelu"
W
wanghuancoder 已提交
2218
        self.python_api = paddle.nn.functional.hardtanh
2219 2220
        self.init_dtype()

2221
        np.random.seed(1024)
Z
zhupengyang 已提交
2222
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2223 2224
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
2225 2226
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
2227
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
2228 2229 2230
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
2231 2232

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
F
fengjiayi 已提交
2233
        self.outputs = {'Out': t}
2234 2235
        self.convert_input_output()
        self.attrs = {'t_min': t_min, 't_max': t_max}
2236 2237

    def test_check_grad(self):
2238 2239
        if self.dtype == np.float16:
            return
2240
        self.check_grad(['X'], 'Out')
2241

2242

2243 2244 2245 2246 2247 2248 2249
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
2250
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
2251
    def setUp(self):
2252
        self.op_type = "relu6"
2253
        self.init_dtype()
2254
        self.init_shape()
2255
        self.python_api = paddle.nn.functional.relu6
2256

2257
        np.random.seed(1024)
2258
        x = np.random.uniform(-1, 10, self.shape).astype(self.dtype)
2259
        x[np.abs(x) < 0.005] = 0.02
2260
        out = ref_relu6(x)
2261

2262
        self.attrs = {'threshold': 6.0}
2263 2264

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2265
        self.outputs = {'Out': out}
2266
        self.convert_input_output()
K
Kavya Srinet 已提交
2267

2268 2269 2270
    def init_shape(self):
        self.shape = [10, 12]

2271 2272 2273
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2274
        self.check_grad(['X'], 'Out')
2275 2276


2277 2278 2279 2280 2281
class TestRelu6_ZeroDim(TestRelu6):
    def init_shape(self):
        self.shape = []


2282 2283 2284
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
2285
        np.random.seed(1024)
2286 2287
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
2288 2289 2290
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2291
            else paddle.CPUPlace()
2292
        )
2293 2294

    def test_static_api(self):
2295
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2296
            with paddle.static.program_guard(paddle.static.Program()):
2297
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2298 2299 2300 2301 2302 2303 2304 2305
                out1 = F.relu6(x)
                relu6 = paddle.nn.ReLU6()
                out2 = relu6(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_relu6(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2306 2307

    def test_dygraph_api(self):
2308 2309 2310 2311 2312 2313 2314 2315
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = F.relu6(x)
            relu6 = paddle.nn.ReLU6()
            out2 = relu6(x)
            out_ref = ref_relu6(self.x_np)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2316 2317

    def test_fluid_api(self):
2318
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2319
            with fluid.program_guard(fluid.Program()):
2320
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2321 2322 2323 2324 2325
                out = paddle.nn.functional.relu6(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_relu6(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2326

2327
    def test_errors(self):
2328
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2329 2330 2331 2332
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.relu6, 1)
                # The input dtype must be float16, float32, float64.
2333
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2334 2335 2336 2337
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.relu6, x_int32)
                # support the input dtype is float16
2338
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2339 2340 2341
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.relu6(x_fp16)
2342 2343


2344 2345
class TestRelu6APIWarnings(unittest.TestCase):
    def test_warnings(self):
2346
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368
            with warnings.catch_warnings(record=True) as context:
                warnings.simplefilter("always")

                helper = LayerHelper("relu6")
                data = paddle.static.data(
                    name='data', shape=[None, 3, 32, 32], dtype='float32'
                )
                out = helper.create_variable_for_type_inference(
                    dtype=data.dtype
                )
                os.environ['FLAGS_print_extra_attrs'] = "1"
                helper.append_op(
                    type="relu6",
                    inputs={'X': data},
                    outputs={'Out': out},
                    attrs={'threshold': 6.0},
                )
                self.assertTrue(
                    "op relu6 use extra_attr: threshold"
                    in str(context[-1].message)
                )
                os.environ['FLAGS_print_extra_attrs'] = "0"
2369 2370


2371
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
2372 2373 2374 2375
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
2376 2377 2378
    return (
        x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale
    ).astype(x_dtype)
2379 2380


H
huangjun12 已提交
2381 2382 2383 2384
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
2385
        self.init_shape()
R
Roc 已提交
2386
        self.prim_op_type = "comp"
2387
        self.python_api = paddle.nn.functional.hardswish
2388
        self.public_python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
2389

2390
        np.random.seed(1024)
2391
        x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
H
huangjun12 已提交
2392 2393 2394
        threshold = 6.0
        scale = 6.0
        offset = 3.0
2395
        # the same with TestAbs
H
huangjun12 已提交
2396 2397
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
2398
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
2399

2400
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
H
huangjun12 已提交
2401
        self.outputs = {'Out': out}
2402 2403
        self.convert_input_output()
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
H
huangjun12 已提交
2404

2405 2406 2407
    def init_shape(self):
        self.shape = [10, 12]

2408 2409 2410
    def if_only_check_prim(self):
        return False

H
huangjun12 已提交
2411
    def test_check_grad(self):
2412 2413 2414 2415 2416 2417
        self.check_grad(
            ['X'],
            'Out',
            check_prim=True,
            only_check_prim=self.if_only_check_prim(),
        )
2418 2419

    def test_check_output(self):
W
wanghuancoder 已提交
2420
        self.check_output(check_prim=True)
H
huangjun12 已提交
2421 2422


2423
class TestHardSwish_ZeroDim(TestHardSwish):
R
Roc 已提交
2424 2425 2426 2427
    def init_shape(self):
        self.shape = []


2428 2429 2430 2431
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2432 2433 2434
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2435
            else paddle.CPUPlace()
2436
        )
2437 2438

    def test_static_api(self):
2439
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2440
            with paddle.static.program_guard(paddle.static.Program()):
2441
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2442 2443 2444 2445 2446 2447 2448 2449
                out1 = F.hardswish(x)
                m = paddle.nn.Hardswish()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardswish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2450 2451

    def test_dygraph_api(self):
2452 2453 2454 2455 2456 2457 2458 2459
        with dynamic_guad():
            x = paddle.to_tensor([11648.0, 11448.0])
            out1 = F.hardswish(x)
            m = paddle.nn.Hardswish()
            out2 = m(x)
            out_ref = [11648.0, 11448.0]
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2460 2461

    def test_fluid_api(self):
2462
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2463
            with fluid.program_guard(fluid.Program()):
2464
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2465 2466 2467 2468 2469 2470
                out = paddle.nn.functional.hardswish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_hardswish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)

2471 2472 2473 2474
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out = paddle.nn.functional.hardswish(x)
            np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
2475 2476

    def test_errors(self):
2477
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2478 2479 2480 2481
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardswish, 1)
                # The input dtype must be float16, float32, float64.
2482
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2483 2484 2485 2486
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardswish, x_int32)
                # support the input dtype is float16
2487
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2488 2489 2490
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardswish(x_fp16)
2491 2492


C
chengduo 已提交
2493
class TestSoftRelu(TestActivation):
2494 2495
    def setUp(self):
        self.op_type = "soft_relu"
2496 2497
        self.init_dtype()

2498
        np.random.seed(4096)
2499
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2500
        threshold = 2.0
Q
qijun 已提交
2501 2502
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
2503
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
2504 2505 2506
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
2507
        out = np.log(np.exp(t) + 1)
2508 2509 2510

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2511 2512
        self.convert_input_output()
        self.attrs = {'threshold': threshold}
2513

2514 2515 2516
    def test_check_output(self):
        self.check_output(check_dygraph=False)

2517
    def test_check_grad(self):
2518 2519
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2520 2521 2522
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.02, check_dygraph=False
        )
2523

2524

2525
def elu(x, alpha):
Z
zhupengyang 已提交
2526
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
2527 2528 2529
    return out_ref.astype(x.dtype)


C
chengduo 已提交
2530
class TestELU(TestActivation):
2531 2532
    def setUp(self):
        self.op_type = "elu"
2533
        self.init_dtype()
2534
        self.init_shape()
W
wanghuancoder 已提交
2535
        self.python_api = paddle.nn.functional.elu
2536

2537
        np.random.seed(1024)
2538
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
Z
zhupengyang 已提交
2539
        alpha = self.get_alpha()
2540
        out = elu(x, alpha)
2541 2542
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
2543 2544

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2545
        self.outputs = {'Out': out}
2546 2547
        self.convert_input_output()
        self.attrs = {'alpha': alpha}
2548

2549 2550 2551
    def init_shape(self):
        self.shape = [10, 12]

2552
    def test_check_grad(self):
2553 2554
        if self.dtype == np.float16:
            return
2555
        self.check_grad(['X'], 'Out')
2556

Z
zhupengyang 已提交
2557
    def get_alpha(self):
2558
        return 1.0
Z
zhupengyang 已提交
2559 2560 2561 2562 2563 2564


class TestELUAlpha(TestELU):
    def get_alpha(self):
        return -0.2

2565

2566 2567 2568 2569 2570
class TestELU_ZeroDim(TestELU):
    def init_shape(self):
        self.shape = []


2571 2572 2573
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
2574
        np.random.seed(1024)
2575
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2576 2577 2578
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2579
            else paddle.CPUPlace()
2580
        )
2581 2582 2583 2584
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2585 2586

    def test_static_api(self):
2587
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2588
            with paddle.static.program_guard(paddle.static.Program()):
2589
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2590 2591 2592 2593 2594 2595 2596 2597
                out1 = self.elu(x)
                m = paddle.nn.ELU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = elu(self.x_np, 1.0)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2598 2599

    def test_dygraph_api(self):
2600 2601 2602 2603 2604 2605 2606 2607 2608
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = self.elu(x)
            x = paddle.to_tensor(self.x_np)
            m = paddle.nn.ELU()
            out2 = m(x)
            out_ref = elu(self.x_np, 1.0)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2609

2610 2611 2612 2613 2614 2615 2616
            out1 = self.elu(x, 0.2)
            x = paddle.to_tensor(self.x_np)
            m = paddle.nn.ELU(0.2)
            out2 = m(x)
            out_ref = elu(self.x_np, 0.2)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2617

2618
    def test_errors(self):
2619
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2620 2621 2622 2623
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.elu, 1)
                # The input dtype must be float16, float32, float64.
2624
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2625 2626 2627 2628
                    name='x_int32', shape=[10, 12], dtype='int32'
                )
                self.assertRaises(TypeError, self.elu, x_int32)
                # support the input dtype is float16
2629
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2630 2631 2632
                    name='x_fp16', shape=[10, 12], dtype='float16'
                )
                self.elu(x_fp16)
2633 2634


Z
zhupengyang 已提交
2635 2636 2637 2638 2639 2640
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
2641 2642 2643
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            self.assertRaises(Exception, F.elu_, x, -0.2)
Z
zhupengyang 已提交
2644 2645


2646 2647 2648 2649 2650 2651 2652 2653 2654
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()
2655
        self.init_shape()
2656

2657
        self.python_api = paddle.nn.functional.celu
2658
        np.random.seed(1024)
2659
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
2660 2661
        alpha = 1.5
        out = celu(x, alpha)
2662 2663

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2664
        self.outputs = {'Out': out}
2665 2666
        self.convert_input_output()
        self.attrs = {'alpha': alpha}
2667

2668 2669 2670
    def init_shape(self):
        self.shape = [10, 12]

2671 2672 2673
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2674
        self.check_grad(['X'], 'Out')
2675 2676


2677 2678 2679 2680 2681
class TestCELU_ZeroDim(TestCELU):
    def init_shape(self):
        self.shape = []


2682 2683 2684 2685 2686
class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2687 2688 2689
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2690
            else paddle.CPUPlace()
2691
        )
2692 2693 2694 2695 2696 2697
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
2698
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2699
            with paddle.static.program_guard(paddle.static.Program()):
2700
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2701 2702 2703 2704 2705 2706 2707 2708
                out1 = self.celu(x, 1.5)
                m = paddle.nn.CELU(1.5)
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = celu(self.x_np, 1.5)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2709 2710

    def test_dygraph_api(self):
2711 2712 2713 2714 2715 2716 2717 2718 2719
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = self.celu(x, 1.5)
            x = paddle.to_tensor(self.x_np)
            m = paddle.nn.CELU(1.5)
            out2 = m(x)
            out_ref = celu(self.x_np, 1.5)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2720

2721 2722 2723 2724 2725 2726 2727
            out1 = self.celu(x, 0.2)
            x = paddle.to_tensor(self.x_np)
            m = paddle.nn.CELU(0.2)
            out2 = m(x)
            out_ref = celu(self.x_np, 0.2)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2728 2729

    def test_errors(self):
2730
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2731 2732 2733 2734
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.celu, 1)
                # The input dtype must be float16, float32, float64.
2735
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2736 2737 2738 2739
                    name='x_int32', shape=[10, 12], dtype='int32'
                )
                self.assertRaises(TypeError, self.celu, x_int32)
                # The alpha must be not equal 0
2740
                x_fp32 = paddle.static.data(
W
wanghuancoder 已提交
2741 2742 2743 2744
                    name='x_fp32', shape=[10, 12], dtype='float32'
                )
                self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
                # support the input dtype is float16
2745
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2746 2747 2748
                    name='x_fp16', shape=[10, 12], dtype='float16'
                )
                self.celu(x_fp16)
2749 2750


C
chengduo 已提交
2751
class TestReciprocal(TestActivation):
Q
qijun 已提交
2752 2753
    def setUp(self):
        self.op_type = "reciprocal"
2754
        self.python_api = paddle.reciprocal
2755
        self.init_dtype()
2756
        self.init_shape()
2757

2758
        np.random.seed(1024)
2759
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2760 2761 2762 2763
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2764
        self.convert_input_output()
Q
qijun 已提交
2765 2766

    def test_check_grad(self):
2767 2768
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2769
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
2770 2771

    def test_check_output(self):
W
wanghuancoder 已提交
2772
        self.check_output()
Q
qijun 已提交
2773 2774


2775 2776 2777 2778 2779
class TestReciprocal_ZeroDim(TestReciprocal):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
2780
class TestLog(TestActivation):
Q
qijun 已提交
2781 2782
    def setUp(self):
        self.op_type = "log"
2783
        self.prim_op_type = "prim"
2784
        self.python_api = paddle.log
2785
        self.public_python_api = paddle.log
2786
        self.init_dtype()
2787
        self.init_shape()
2788
        self.if_enable_cinn()
2789

2790
        np.random.seed(1024)
2791
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2792 2793 2794 2795
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2796
        self.convert_input_output()
Q
qijun 已提交
2797

2798 2799 2800
    def if_enable_cinn(self):
        pass

Q
qijun 已提交
2801
    def test_check_grad(self):
2802 2803
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2804
        self.check_grad(['X'], 'Out', check_prim=True)
Q
qijun 已提交
2805

2806

2807 2808
class Test_Log_Op_Fp16(unittest.TestCase):
    def test_api_fp16(self):
2809
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2810 2811 2812 2813 2814 2815 2816 2817 2818 2819
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='float16')
                out = paddle.log(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])
2820

2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845
    def test_api_bf16(self):
        with paddle.fluid.framework._static_guard():
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='bfloat16')
                out = paddle.log(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])


class Test_Log_Op_Int(unittest.TestCase):
    def test_api_int(self):
        paddle.disable_static()
        for dtype in ('int32', 'int64', 'float16'):
            np_x = np.array([[2, 3, 4], [7, 8, 9]], dtype=dtype)
            x = paddle.to_tensor(np_x, dtype=dtype)
            y = paddle.log(x)
            x_expect = np.log(np_x)
            np.testing.assert_allclose(y.numpy(), x_expect, rtol=1e-3)
        paddle.enable_static()

2846

2847 2848 2849 2850 2851
class TestLog_ZeroDim(TestLog):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2852 2853 2854
class TestLog2(TestActivation):
    def setUp(self):
        self.op_type = "log2"
2855
        self.python_api = paddle.log2
J
joejiong 已提交
2856
        self.init_dtype()
2857
        self.init_shape()
J
joejiong 已提交
2858

2859
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2860 2861 2862 2863
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2864
        self.convert_input_output()
J
joejiong 已提交
2865 2866 2867 2868

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2869
        self.check_grad(['X'], 'Out')
J
joejiong 已提交
2870 2871

    def test_api(self):
2872
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x", shape=[11, 17], dtype="float64"
                )

                out1 = paddle.log2(data_x)
                exe = paddle.static.Executor(place=fluid.CPUPlace())
                exe.run(paddle.static.default_startup_program())
                (res1,) = exe.run(
                    paddle.static.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log2(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2891 2892 2893 2894 2895 2896 2897 2898

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2899
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2900 2901


2902 2903 2904 2905 2906
class TestLog2_ZeroDim(TestLog2):
    def init_shape(self):
        self.shape = []


2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931
class TestLog2_Op_Int(unittest.TestCase):
    def test_api_int(self):
        paddle.disable_static()
        for dtype in ['int32', 'int64', 'float16']:
            np_x = np.array([[2, 3, 4], [7, 8, 9]], dtype=dtype)
            x = paddle.to_tensor(np_x, dtype=dtype)
            y = paddle.log2(x)
            x_expect = np.log2(np_x)
            np.testing.assert_allclose(y.numpy(), x_expect, rtol=1e-3)
        paddle.enable_static()

    def test_api_bf16(self):
        with paddle.fluid.framework._static_guard():
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='bfloat16')
                out = paddle.log2(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])


J
joejiong 已提交
2932 2933 2934
class TestLog10(TestActivation):
    def setUp(self):
        self.op_type = "log10"
2935
        self.python_api = paddle.log10
J
joejiong 已提交
2936
        self.init_dtype()
2937
        self.init_shape()
J
joejiong 已提交
2938

2939
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2940 2941 2942 2943
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2944
        self.convert_input_output()
J
joejiong 已提交
2945 2946 2947 2948

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2949
        self.check_grad(['X'], 'Out')
J
joejiong 已提交
2950

2951 2952 2953 2954 2955 2956

class TestLog10_ZeroDim(TestLog10):
    def init_shape(self):
        self.shape = []


2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968
class TestLog10_Op_Int(unittest.TestCase):
    def test_api_int(self):
        paddle.disable_static()
        for dtype in ['int32', 'int64', 'float16']:
            np_x = np.array([[2, 3, 4], [7, 8, 9]], dtype=dtype)
            x = paddle.to_tensor(np_x, dtype=dtype)
            y = paddle.log10(x)
            x_expect = np.log10(np_x)
            np.testing.assert_allclose(y.numpy(), x_expect, rtol=1e-3)
        paddle.enable_static()

    def test_api_bf16(self):
2969
        with paddle.fluid.framework._static_guard():
2970 2971 2972 2973 2974 2975 2976 2977 2978 2979
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='bfloat16')
                out = paddle.log10(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])
J
joejiong 已提交
2980 2981


2982
class TestLog10API(unittest.TestCase):
J
joejiong 已提交
2983
    def test_api(self):
2984
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x", shape=[11, 17], dtype="float64"
                )

                out1 = paddle.log10(data_x)
                exe = paddle.static.Executor(place=paddle.CPUPlace())
                exe.run(paddle.static.default_startup_program())
                (res1,) = exe.run(
                    paddle.static.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log10(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
3003 3004 3005 3006 3007 3008 3009 3010

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
3011
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
3012 3013


3014 3015 3016
class TestLog1p(TestActivation):
    def setUp(self):
        self.op_type = "log1p"
3017
        self.python_api = paddle.log1p
3018
        self.init_dtype()
3019
        self.init_shape()
3020

3021
        np.random.seed(1024)
3022
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
3023 3024 3025 3026
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
3027
        self.convert_input_output()
3028 3029 3030 3031

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3032
        self.check_grad(['X'], 'Out')
3033

3034

3035 3036
class Test_Log1p_Op_Fp16(unittest.TestCase):
    def test_api_fp16(self):
3037
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3038 3039 3040 3041 3042 3043 3044 3045 3046 3047
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='float16')
                out = paddle.log1p(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])
3048 3049


3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074
class TestLog1p_Op_Int(unittest.TestCase):
    def test_api_int(self):
        paddle.disable_static()
        for dtype in ['int32', 'int64', 'float16']:
            np_x = np.array([[2, 3, 4], [7, 8, 9]], dtype=dtype)
            x = paddle.to_tensor(np_x, dtype=dtype)
            y = paddle.log1p(x)
            x_expect = np.log1p(np_x)
            np.testing.assert_allclose(y.numpy(), x_expect, rtol=1e-3)
        paddle.enable_static()

    def test_api_bf16(self):
        with paddle.fluid.framework._static_guard():
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='bfloat16')
                out = paddle.log1p(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])


3075 3076 3077 3078 3079 3080
class TestLog1p_ZeroDim(TestLog1p):
    def init_shape(self):
        self.shape = []


class TestLog1pAPI(unittest.TestCase):
3081
    def test_api(self):
3082
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x",
                    shape=[11, 17],
                    dtype="float64",
                )

                out1 = paddle.log1p(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (res1,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log1p(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
3101 3102 3103 3104 3105 3106 3107 3108

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
3109
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
3110 3111


C
chengduo 已提交
3112
class TestSquare(TestActivation):
Q
qijun 已提交
3113 3114
    def setUp(self):
        self.op_type = "square"
3115
        self.python_api = paddle.square
3116
        self.init_dtype()
3117
        self.init_shape()
3118

3119
        np.random.seed(1024)
3120
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
3121 3122 3123 3124
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
3125
        self.convert_input_output()
Q
qijun 已提交
3126 3127

    def test_check_grad(self):
3128 3129
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3130
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
3131 3132

    def test_check_output(self):
W
wanghuancoder 已提交
3133
        self.check_output()
Q
qijun 已提交
3134

3135

3136 3137 3138 3139 3140
class TestSquare_ZeroDim(TestSquare):
    def init_shape(self):
        self.shape = []


3141
@unittest.skipIf(
R
ronnywang 已提交
3142 3143
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
3144
)
3145 3146 3147
class TestSquareBF16(OpTest):
    def setUp(self):
        self.op_type = "square"
3148
        self.python_api = paddle.square
3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
3165
        self.check_output_with_place(place)
3166 3167 3168

    def test_check_grad(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
3169
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.5)
3170 3171


C
chengduo 已提交
3172
class TestPow(TestActivation):
3173 3174
    def setUp(self):
        self.op_type = "pow"
3175
        self.prim_op_type = "comp"
3176
        self.python_api = paddle.pow
3177
        self.public_python_api = paddle.pow
3178
        self.init_dtype()
3179
        self.init_shape()
3180
        self.if_enable_cinn()
3181

3182
        np.random.seed(1024)
3183
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
3184 3185 3186 3187
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
3188 3189
        self.attrs = {'factor': 3.0}
        self.convert_input_output()
3190

3191 3192 3193
    def if_enable_cinn(self):
        pass

3194
    def test_check_output(self):
3195
        self.check_output(check_prim=True)
3196

3197
    def test_check_grad(self):
3198 3199
        if self.dtype == np.float16:
            return
3200
        self.check_grad(['X'], 'Out', check_prim=True)
3201

3202

3203 3204 3205 3206 3207
class TestPow_ZeroDim(TestPow):
    def init_shape(self):
        self.shape = []


3208 3209 3210
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
3211
        self.python_api = paddle.pow
3212
        self.enable_cinn = False
3213 3214
        self.init_dtype()

3215
        np.random.seed(1024)
3216 3217 3218 3219 3220
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
W
wanghuancoder 已提交
3221
            'FactorTensor': np.array([3.0]).astype(self.dtype),
3222 3223 3224 3225 3226 3227
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
W
wanghuancoder 已提交
3228
        self.check_output()
3229 3230 3231 3232

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3233
        self.check_grad(['X'], 'Out')
3234 3235

    def test_api(self):
3236
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3237 3238 3239 3240 3241 3242 3243
            input = np.random.uniform(1, 2, [11, 17]).astype("float32")
            x = paddle.static.data(name="x", shape=[11, 17], dtype="float32")
            res = paddle.static.data(
                name="res", shape=[11, 17], dtype="float32"
            )

            factor_1 = 2.0
3244
            factor_2 = paddle.tensor.fill_constant([1], "float32", 3.0)
W
wanghuancoder 已提交
3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256
            out_1 = paddle.pow(x, factor_1)
            out_2 = paddle.pow(x, factor_2)
            out_4 = paddle.pow(x, factor_1, name='pow_res')
            out_6 = paddle.pow(x, factor_2)
            self.assertEqual(('pow_res' in out_4.name), True)

            exe = fluid.Executor(place=fluid.CPUPlace())
            res_1, res_2, res, res_6 = exe.run(
                fluid.default_main_program(),
                feed={"x": input},
                fetch_list=[out_1, out_2, res, out_6],
            )
3257

W
wanghuancoder 已提交
3258 3259 3260
            assert np.allclose(res_1, np.power(input, 2))
            assert np.allclose(res_2, np.power(input, 3))
            assert np.allclose(res_6, np.power(input, 3))
3261 3262


3263 3264 3265 3266 3267
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
3268
class TestSTanh(TestActivation):
3269 3270 3271 3272 3273 3274
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

3275 3276
    def setUp(self):
        self.op_type = "stanh"
W
wanghuancoder 已提交
3277
        self.python_api = paddle.stanh
3278
        self.init_dtype()
3279 3280
        self.init_shape()

3281 3282
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
3283

3284
        np.random.seed(1024)
3285
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
3286 3287
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
3288

3289
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3290
        self.outputs = {'Out': out}
3291 3292
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
        self.convert_input_output()
3293

Q
qijun 已提交
3294
    def test_check_grad(self):
3295 3296
        if self.dtype == np.float16:
            return
3297
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
3298

3299

3300 3301 3302 3303 3304 3305 3306 3307 3308 3309
class TestSTanhScaleA(TestSTanh):
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
    def get_scale_b(self):
        return 0.5


3310 3311 3312 3313 3314
class TestSTanh_ZeroDim(TestSTanh):
    def init_shape(self):
        self.shape = []


3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327
class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
3328 3329 3330
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
3331
            else paddle.CPUPlace()
3332
        )
3333 3334

    def test_static_api(self):
3335
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3336
            with paddle.static.program_guard(paddle.static.Program()):
3337
                x = paddle.static.data('X', [10, 12])
W
wanghuancoder 已提交
3338 3339 3340 3341 3342 3343
                out = paddle.stanh(x, self.scale_a, self.scale_b)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3344 3345

    def test_dygraph_api(self):
3346 3347 3348 3349 3350 3351
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out = paddle.stanh(x, self.scale_a, self.scale_b)
            out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
            for r in [out]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3352 3353

    def test_fluid_api(self):
3354
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3355
            with fluid.program_guard(fluid.Program()):
3356
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
3357 3358 3359 3360 3361
                out = paddle.stanh(x, self.scale_a, self.scale_b)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3362

3363
    def test_errors(self):
3364
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3365 3366 3367 3368
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.stanh, 1)
                # The input dtype must be float16, float32, float64.
3369
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3370 3371 3372 3373
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.stanh, x_int32)
                # support the input dtype is float16
3374
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3375 3376 3377
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.stanh(x_fp16)
3378 3379 3380 3381 3382 3383 3384 3385 3386 3387


class TestSTanhAPIScaleA(TestSTanhAPI):
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
    def get_scale_b(self):
        return 0.5
3388 3389


3390 3391
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
3392 3393 3394 3395
    out = np.select(
        [x_beta <= threshold, x_beta > threshold],
        [np.log(1 + np.exp(x_beta)) / beta, x],
    )
3396 3397 3398
    return out


C
chengduo 已提交
3399
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
3400 3401
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
3402
        self.python_api = paddle.nn.functional.softplus
3403
        self.init_dtype()
3404
        self.init_shape()
3405

3406 3407
        beta = 2
        threshold = 15
3408

3409
        np.random.seed(1024)
3410
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3411 3412 3413
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
3414
        self.outputs = {'Out': out}
K
kexinzhao 已提交
3415

3416 3417 3418
    def init_shape(self):
        self.shape = [10, 12]

K
kexinzhao 已提交
3419
    def test_check_grad(self):
3420 3421
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3422
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
3423

3424

3425 3426 3427 3428 3429
class TestSoftplus_ZeroDim(TestSoftplus):
    def init_shape(self):
        self.shape = []


3430
@unittest.skipIf(
R
ronnywang 已提交
3431 3432
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
3433
)
3434 3435 3436 3437
class TestSoftplusBF16(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()
W
wanghuancoder 已提交
3438
        self.python_api = paddle.nn.functional.softplus
3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


3462 3463 3464 3465 3466
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
3467
        np.random.seed(1024)
3468
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3469 3470 3471
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3472
            else paddle.CPUPlace()
3473
        )
3474 3475

    def test_static_api(self):
3476
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3477
            with paddle.static.program_guard(paddle.static.Program()):
3478
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3479 3480 3481 3482 3483 3484 3485 3486
                out1 = F.softplus(x, self.beta, self.threshold)
                softplus = paddle.nn.Softplus(self.beta, self.threshold)
                out2 = softplus(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3487 3488

    def test_dygraph_api(self):
3489 3490 3491 3492 3493 3494 3495 3496
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = F.softplus(x, self.beta, self.threshold)
            softplus = paddle.nn.Softplus(self.beta, self.threshold)
            out2 = softplus(x)
            out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3497 3498

    def test_errors(self):
3499
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3500 3501 3502 3503
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softplus, 1)
                # The input dtype must be float16, float32, float64.
3504
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3505 3506 3507 3508
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softplus, x_int32)
                # support the input dtype is float16
3509
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3510 3511 3512
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softplus(x_fp16)
3513 3514 3515 3516 3517 3518 3519


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
3520
class TestSoftsign(TestActivation):
3521 3522
    def setUp(self):
        self.op_type = "softsign"
3523
        self.init_dtype()
3524 3525
        self.init_shape()

3526
        self.python_api = paddle.nn.functional.softsign
3527

3528
        np.random.seed(1024)
3529
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3530
        out = ref_softsign(x)
3531 3532

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3533
        self.outputs = {'Out': out}
3534
        self.convert_input_output()
3535

3536 3537 3538
    def init_shape(self):
        self.shape = [10, 12]

3539
    def test_check_grad(self):
3540 3541
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3542
        self.check_grad(['X'], 'Out')
3543 3544


3545 3546 3547 3548 3549
class TestSoftsign_ZeroDim(TestSoftsign):
    def init_shape(self):
        self.shape = []


3550 3551 3552
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
3553
        np.random.seed(1024)
3554
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3555 3556 3557
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3558
            else paddle.CPUPlace()
3559
        )
3560 3561

    def test_static_api(self):
3562
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3563
            with paddle.static.program_guard(paddle.static.Program()):
3564
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3565 3566 3567 3568 3569 3570 3571 3572
                out1 = F.softsign(x)
                softsign = paddle.nn.Softsign()
                out2 = softsign(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softsign(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3573 3574

    def test_dygraph_api(self):
3575 3576 3577 3578 3579 3580 3581 3582
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = F.softsign(x)
            softsign = paddle.nn.Softsign()
            out2 = softsign(x)
            out_ref = ref_softsign(self.x_np)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3583 3584

    def test_errors(self):
3585
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3586 3587 3588 3589
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softsign, 1)
                # The input dtype must be float16, float32, float64.
3590
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3591 3592 3593 3594
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softsign, x_int32)
                # support the input dtype is float16
3595
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3596 3597 3598
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softsign(x_fp16)
3599 3600


3601 3602 3603 3604 3605
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
3606
class TestThresholdedRelu(TestActivation):
3607 3608
    def setUp(self):
        self.op_type = "thresholded_relu"
3609
        self.init_dtype()
3610
        self.init_shape()
W
wanghuancoder 已提交
3611
        self.python_api = paddle.nn.functional.thresholded_relu
3612

3613
        threshold = 15
3614

3615
        np.random.seed(1024)
3616
        x = np.random.uniform(-20, 20, self.shape).astype(self.dtype)
3617 3618
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
3619 3620

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3621
        self.outputs = {'Out': out}
3622 3623
        self.attrs = {"threshold": threshold}
        self.convert_input_output()
3624

3625 3626 3627
    def init_shape(self):
        self.shape = [10, 12]

3628
    def test_check_grad(self):
3629 3630
        if self.dtype == np.float16:
            return
3631
        self.check_grad(['X'], 'Out')
3632 3633


3634 3635 3636 3637 3638
class TestThresholdedRelu_ZeroDim(TestThresholdedRelu):
    def init_shape(self):
        self.shape = []


3639 3640 3641 3642 3643 3644 3645
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
3646 3647 3648
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3649
            else paddle.CPUPlace()
3650
        )
3651 3652

    def test_static_api(self):
3653
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3654
            with paddle.static.program_guard(paddle.static.Program()):
3655
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3656 3657 3658 3659 3660 3661 3662 3663
                out1 = F.thresholded_relu(x, self.threshold)
                thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
                out2 = thresholded_relu(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_thresholded_relu(self.x_np, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3664 3665

    def test_dygraph_api(self):
3666 3667 3668 3669 3670 3671 3672 3673
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = F.thresholded_relu(x, self.threshold)
            thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
            out2 = thresholded_relu(x)
            out_ref = ref_thresholded_relu(self.x_np, self.threshold)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3674

3675
    def test_errors(self):
3676
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3677 3678 3679 3680
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.thresholded_relu, 1)
                # The input dtype must be float16, float32, float64.
3681
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3682 3683 3684 3685
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.thresholded_relu, x_int32)
                # support the input dtype is float16
3686
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3687 3688 3689
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.thresholded_relu(x_fp16)
3690 3691


3692
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
3693
    return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype)
3694 3695


C
chengduo 已提交
3696
class TestHardSigmoid(TestActivation):
3697 3698
    def setUp(self):
        self.op_type = "hard_sigmoid"
3699 3700 3701 3702
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
3703
        self.init_shape()
W
wanghuancoder 已提交
3704
        self.python_api = paddle.nn.functional.hardsigmoid
3705

3706
        x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
3707
        lower_threshold = -self.offset / self.slope
3708
        upper_threshold = (1.0 - self.offset) / self.slope
Z
zhupengyang 已提交
3709

3710
        # Same reason as TestAbs
3711 3712 3713
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
3714

3715
        out = ref_hardsigmoid(x, self.slope, self.offset)
3716

3717
        self.attrs = {'slope': self.slope, 'offset': self.offset}
3718 3719

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3720
        self.outputs = {'Out': out}
3721
        self.convert_input_output()
3722

3723 3724 3725
    def init_shape(self):
        self.shape = [10, 12]

3726 3727
    def set_attrs(self):
        pass
3728

3729

3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740
class TestHardSigmoidFP32(TestHardSigmoid):
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


3741 3742 3743 3744 3745
class TestHardSigmoid_ZeroDim(TestHardSigmoid):
    def init_shape(self):
        self.shape = []


3746 3747 3748 3749
class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3750 3751 3752
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3753
            else paddle.CPUPlace()
3754
        )
3755 3756

    def test_static_api(self):
3757
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3758 3759 3760 3761 3762 3763 3764 3765 3766 3767
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.hardsigmoid(x)
                m = paddle.nn.Hardsigmoid()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardsigmoid(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3768 3769

    def test_dygraph_api(self):
3770 3771 3772 3773 3774 3775 3776 3777
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = F.hardsigmoid(x)
            m = paddle.nn.Hardsigmoid()
            out2 = m(x)
            out_ref = ref_hardsigmoid(self.x_np)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3778 3779

    def test_fluid_api(self):
3780
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3781
            with fluid.program_guard(fluid.Program()):
3782
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3783 3784 3785 3786 3787 3788
                out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)

3789
        paddle.disable_static(self.place)
3790
        x = paddle.to_tensor(self.x_np)
3791
        out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3792
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3793 3794

    def test_errors(self):
3795
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3796 3797 3798 3799
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardsigmoid, 1)
                # The input dtype must be float16, float32, float64.
3800
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3801 3802 3803 3804
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardsigmoid, x_int32)
                # support the input dtype is float16
3805
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3806 3807 3808
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardsigmoid(x_fp16)
3809 3810


3811 3812 3813 3814 3815
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3816
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
3817 3818
    def setUp(self):
        self.op_type = "swish"
3819
        self.python_api = paddle.nn.functional.swish
3820
        self.init_dtype()
3821 3822
        self.init_shape()

3823
        np.random.seed(1024)
3824
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3825
        out = ref_swish(x)
3826 3827

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3828
        self.outputs = {'Out': out}
3829 3830
        self.attrs = {'beta': 1.0}
        self.convert_input_output()
A
Abhinav Arora 已提交
3831

3832 3833 3834
    def init_shape(self):
        self.shape = [10, 12]

A
Abhinav Arora 已提交
3835
    def test_check_grad(self):
3836 3837
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3838 3839 3840 3841
        self.check_grad(
            ['X'],
            'Out',
        )
3842

A
Abhinav Arora 已提交
3843

3844 3845 3846 3847 3848
class TestSwish_ZeroDim(TestSwish):
    def init_shape(self):
        self.shape = []


3849 3850 3851 3852 3853
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3854 3855 3856
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3857
            else paddle.CPUPlace()
3858
        )
3859 3860

    def test_static_api(self):
3861
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3862 3863 3864 3865 3866 3867 3868 3869 3870 3871
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.swish(x)
                swish = paddle.nn.Swish()
                out2 = swish(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_swish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3872

3873
    def test_dygraph_api(self):
3874 3875 3876 3877 3878 3879 3880 3881
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = F.swish(x)
            swish = paddle.nn.Swish()
            out2 = swish(x)
            out_ref = ref_swish(self.x_np)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3882 3883

    def test_fluid_api(self):
3884
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3885
            with fluid.program_guard(fluid.Program()):
3886
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3887 3888 3889 3890 3891
                out = paddle.nn.functional.swish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_swish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3892

3893
    def test_errors(self):
3894
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3895 3896 3897 3898
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.swish, 1)
                # The input dtype must be float16, float32, float64.
3899
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3900 3901 3902 3903
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.swish, x_int32)
                # support the input dtype is float16
3904
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3905 3906 3907
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.swish(x_fp16)
3908 3909


3910 3911 3912 3913
def ref_mish(x, threshold=20.0):
    softplus = np.select(
        [x <= threshold, x > threshold], [np.log(1 + np.exp(x)), x]
    )
3914 3915 3916 3917 3918 3919
    return x * np.tanh(softplus)


class TestMish(TestActivation):
    def setUp(self):
        self.op_type = "mish"
3920
        self.python_api = paddle.nn.functional.mish
3921
        self.init_dtype()
3922
        self.init_shape()
3923 3924

        np.random.seed(1024)
3925
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3926
        out = ref_mish(x)
3927 3928

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3929
        self.outputs = {'Out': out}
3930
        self.convert_input_output()
3931

3932 3933 3934
    def init_shape(self):
        self.shape = [10, 12]

3935
    def test_check_output(self):
W
wanghuancoder 已提交
3936
        self.check_output()
3937

3938 3939 3940
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3941
        self.check_grad(['X'], 'Out')
3942 3943


3944 3945 3946 3947 3948
class TestMish_ZeroDim(TestMish):
    def init_shape(self):
        self.shape = []


3949 3950 3951 3952 3953
class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3954 3955 3956
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3957
            else paddle.CPUPlace()
3958
        )
3959 3960

    def test_static_api(self):
3961
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3962 3963 3964 3965 3966 3967 3968 3969 3970 3971
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.mish(x)
                mish = paddle.nn.Mish()
                out2 = mish(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_mish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3972 3973

    def test_dygraph_api(self):
3974 3975 3976 3977 3978 3979 3980 3981
        with dynamic_guad():
            x = paddle.to_tensor(self.x_np)
            out1 = F.mish(x)
            mish = paddle.nn.Mish()
            out2 = mish(x)
            out_ref = ref_mish(self.x_np)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3982 3983

    def test_fluid_api(self):
3984
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3985
            with fluid.program_guard(fluid.Program()):
3986
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3987 3988 3989 3990 3991
                out = paddle.nn.functional.mish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_mish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3992 3993

    def test_errors(self):
3994
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3995 3996 3997 3998
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.mish, 1)
                # The input dtype must be float16, float32, float64.
3999
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
4000 4001 4002 4003
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.mish, x_int32)
                # support the input dtype is float16
4004
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
4005 4006 4007
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.mish(x_fp16)
4008 4009


4010
# ------------------ Test Cudnn Activation----------------------
4011
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
4012 4013 4014
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
4015 4016 4017 4018
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

4019
    cls_name = "{}_{}".format(parent.__name__, "cudnn")
4020 4021 4022 4023 4024 4025 4026 4027 4028 4029
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


4030 4031
# ------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(
4032 4033 4034
    parent,
    atol=1e-3,
    grad_check=True,
4035
    check_dygraph=True,
4036
    check_prim=False,
4037
    enable_cinn=False,
4038
    grad_atol=1e-2,
4039
    **kwargs
4040 4041 4042 4043
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
C
chengduo 已提交
4044
    class TestActFp16(parent):
4045 4046 4047 4048 4049
        def setUp(self):
            super().setUp()
            for k, v in kwargs.items():
                setattr(self, k, v)

C
chengduo 已提交
4050 4051
        def init_dtype(self):
            self.dtype = np.float16
4052

4053
        def if_enable_cinn(self):
4054 4055
            self.enable_cinn = enable_cinn

C
chengduo 已提交
4056
        def test_check_output(self):
4057
            place = core.CUDAPlace(0)
C
chengduo 已提交
4058 4059
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
4060
                self.check_output_with_place(
4061 4062 4063 4064
                    place,
                    atol=atol,
                    check_dygraph=check_dygraph,
                    check_prim=check_prim,
4065
                )
4066

C
chengduo 已提交
4067 4068 4069 4070
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
4071
                self.check_grad_with_place(
4072 4073 4074
                    place,
                    ['X'],
                    'Out',
4075
                    check_dygraph=check_dygraph,
4076 4077
                    check_prim=check_prim,
                    max_relative_error=grad_atol,
4078
                )
C
chengduo 已提交
4079

4080
    cls_name = "{}_{}".format(parent.__name__, "FP16OP")
C
chengduo 已提交
4081 4082 4083 4084
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


4085
create_test_act_fp16_class(TestActivation)
4086
create_test_act_fp16_class(TestExpFp32_Prim, check_prim=True, enable_cinn=True)
R
ronnywang 已提交
4087
create_test_act_fp16_class(TestExpm1)
4088 4089
create_test_act_fp16_class(TestSigmoid, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestSilu, check_prim=True, enable_cinn=True)
C
chengduo 已提交
4090
create_test_act_fp16_class(TestLogSigmoid)
4091
create_test_act_fp16_class(TestTanh, check_prim=True, enable_cinn=True)
4092
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
4093
create_test_act_fp16_class(TestHardShrink)
4094
create_test_act_fp16_class(TestSoftshrink)
4095 4096 4097
create_test_act_fp16_class(TestSqrt, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestSqrtComp, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestAbs, check_prim=True, enable_cinn=True)
C
chengduo 已提交
4098
create_test_act_fp16_class(TestCeil, grad_check=False)
4099 4100 4101
create_test_act_fp16_class(
    TestFloor, check_prim=True, grad_check=False, enable_cinn=True
)
4102 4103 4104 4105
create_test_act_fp16_class(TestCos)
create_test_act_fp16_class(TestTan)
create_test_act_fp16_class(TestCosh)
create_test_act_fp16_class(TestAcos)
C
chengduo 已提交
4106
create_test_act_fp16_class(TestSin)
4107
create_test_act_fp16_class(TestSinh)
4108 4109
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
4110 4111 4112
create_test_act_fp16_class(TestAcosh)
create_test_act_fp16_class(TestAsinh)
create_test_act_fp16_class(TestAtanh)
C
chengduo 已提交
4113
create_test_act_fp16_class(TestRound, grad_check=False)
4114
create_test_act_fp16_class(TestRelu, check_prim=True, enable_cinn=True)
4115 4116 4117
create_test_act_fp16_class(
    TestGelu,
    check_prim=True,
4118
    enable_cinn=True,
4119 4120
    rev_comp_rtol=1e-3,
    rev_comp_atol=1e-3,
4121 4122
    cinn_rtol=1e-3,
    cinn_atol=1e-3,
4123
)
C
chengduo 已提交
4124 4125
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
4126
create_test_act_fp16_class(TestSoftRelu, check_dygraph=False)
C
chengduo 已提交
4127
create_test_act_fp16_class(TestELU)
4128
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
4129
create_test_act_fp16_class(TestReciprocal)
4130
create_test_act_fp16_class(TestLog, check_prim=True)
4131
if core.is_compiled_with_rocm():
4132
    create_test_act_fp16_class(TestLog2)
4133
else:
4134 4135 4136
    create_test_act_fp16_class(TestLog2)
create_test_act_fp16_class(TestLog10)
create_test_act_fp16_class(TestLog1p)
C
chengduo 已提交
4137
create_test_act_fp16_class(TestSquare)
4138 4139 4140
create_test_act_fp16_class(TestPow, check_prim=True)
create_test_act_fp16_class(TestPow_factor_tensor)
create_test_act_fp16_class(TestSTanh)
C
chengduo 已提交
4141 4142 4143 4144
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
4145
create_test_act_fp16_class(TestSwish)
4146
create_test_act_fp16_class(TestHardSwish, check_prim=True)
4147
create_test_act_fp16_class(TestMish)
4148
create_test_act_fp16_class(TestLeakyRelu, check_prim=True, enable_cinn=True)
4149
create_test_act_fp16_class(
4150
    TestLeakyReluAlpha1, check_prim=True, enable_cinn=True
4151
)
4152 4153 4154 4155 4156 4157 4158 4159
create_test_act_fp16_class(
    TestLeakyReluAlpha2, check_prim=True, enable_cinn=True
)
create_test_act_fp16_class(
    TestLeakyReluAlpha3, check_prim=True, enable_cinn=True
)
create_test_act_fp16_class(TestLeakyRelu_ZeroDim, check_prim=True)
create_test_act_fp16_class(TestRsqrt, check_prim=True, enable_cinn=True)
A
Abhinav Arora 已提交
4160

4161

4162
def create_test_act_bf16_class(
4163 4164 4165 4166 4167
    parent,
    atol=1e-2,
    grad_check=True,
    check_dygraph=True,
    check_prim=False,
4168
    enable_cinn=False,
4169 4170
    grad_atol=1e-2,
    **kwargs
4171 4172
):
    @unittest.skipIf(
4173 4174 4175
        not core.is_compiled_with_cuda()
        or not core.is_bfloat16_supported(core.CUDAPlace(0)),
        "core is not compiled with CUDA and do not support bfloat16",
4176
    )
4177
    class TestActBF16(parent):
4178 4179 4180 4181 4182
        def setUp(self):
            super().setUp()
            for k, v in kwargs.items():
                setattr(self, k, v)

4183
        def init_dtype(self):
4184 4185
            self.dtype = np.float32

4186 4187 4188
        def if_enable_cinn(self):
            self.enable_cinn = enable_cinn

4189 4190 4191
        def convert_input_output(self):
            self.inputs = {'X': convert_float_to_uint16(self.inputs['X'])}
            self.outputs = {'Out': convert_float_to_uint16(self.outputs['Out'])}
4192 4193 4194 4195
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
4196 4197 4198
            self.check_output_with_place(
                place, atol=atol, check_prim=check_prim
            )
4199 4200 4201

        def test_check_grad(self):
            place = core.CUDAPlace(0)
4202 4203
            if grad_check:
                self.check_grad_with_place(
4204 4205 4206 4207 4208
                    place,
                    ['X'],
                    'Out',
                    max_relative_error=grad_atol,
                    check_prim=check_prim,
4209
                )
4210

4211
    cls_name = "{}_{}".format(parent.__name__, "BF16OP")
4212 4213 4214 4215
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


4216
create_test_act_bf16_class(TestActivation)
4217
create_test_act_bf16_class(TestExpFp32_Prim, check_prim=True)
4218 4219 4220 4221
create_test_act_bf16_class(TestExpm1)
create_test_act_bf16_class(TestSigmoid, check_prim=True)
create_test_act_bf16_class(TestSilu, check_prim=True)
create_test_act_bf16_class(TestLogSigmoid)
4222
create_test_act_bf16_class(TestTanh, check_prim=True)
4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248
create_test_act_bf16_class(TestTanhshrink)
create_test_act_bf16_class(TestHardShrink)
create_test_act_bf16_class(TestSoftshrink)
create_test_act_bf16_class(TestSqrt, check_prim=True)
create_test_act_bf16_class(TestSqrtComp, check_prim=True)
create_test_act_bf16_class(TestAbs, check_prim=True)
create_test_act_bf16_class(TestCeil, grad_check=False)
create_test_act_bf16_class(TestFloor, grad_check=False, check_prim=True)
create_test_act_bf16_class(TestCos)
create_test_act_bf16_class(TestTan)
create_test_act_bf16_class(TestCosh)
create_test_act_bf16_class(TestAcos)
create_test_act_bf16_class(TestSin)
create_test_act_bf16_class(TestSinh)
create_test_act_bf16_class(TestAsin)
create_test_act_bf16_class(TestAtan)
create_test_act_bf16_class(TestAcosh)
create_test_act_bf16_class(TestAsinh)
create_test_act_bf16_class(TestAtanh)
create_test_act_bf16_class(TestRound, grad_check=False)
create_test_act_bf16_class(TestRelu, check_prim=True)
create_test_act_bf16_class(
    TestGelu,
    check_prim=True,
    rev_comp_rtol=1e-2,
    rev_comp_atol=1e-2,
4249 4250
    cinn_rtol=1e-2,
    cinn_atol=1e-2,
4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275
)
create_test_act_bf16_class(TestBRelu)
create_test_act_bf16_class(TestRelu6)
create_test_act_bf16_class(TestSoftRelu, check_dygraph=False)
create_test_act_bf16_class(TestELU)
create_test_act_bf16_class(TestCELU)
create_test_act_bf16_class(TestReciprocal)
create_test_act_bf16_class(TestLog, check_prim=True)
if core.is_compiled_with_rocm():
    create_test_act_bf16_class(TestLog2)
else:
    create_test_act_bf16_class(TestLog2)
create_test_act_bf16_class(TestLog10)
create_test_act_bf16_class(TestLog1p)
create_test_act_bf16_class(TestSquare)
create_test_act_bf16_class(TestPow, check_prim=True)
create_test_act_bf16_class(TestPow_factor_tensor)
create_test_act_bf16_class(TestSTanh)
create_test_act_bf16_class(TestSoftplus)
create_test_act_bf16_class(TestSoftsign)
create_test_act_bf16_class(TestThresholdedRelu)
create_test_act_bf16_class(TestHardSigmoid)
create_test_act_bf16_class(TestSwish)
create_test_act_bf16_class(TestHardSwish, check_prim=True)
create_test_act_bf16_class(TestMish)
4276 4277 4278 4279 4280 4281
create_test_act_bf16_class(TestLeakyRelu, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha1, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha2, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha3, check_prim=True)
create_test_act_bf16_class(TestLeakyRelu_ZeroDim, check_prim=True)
create_test_act_bf16_class(TestRsqrt, check_prim=True)
4282

Q
qijun 已提交
4283 4284
if __name__ == "__main__":
    unittest.main()