test_activation_op.py 30.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
22
import paddle
23 24
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
Q
qijun 已提交
25 26


27
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
            self.assertRaises(TypeError, fluid.layers.sqrt, in1)
            # The input dtype of sqrt op must be float16, float32, float64.
            in2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.sqrt, in2)

            in3 = fluid.layers.data(
                name='input3', shape=[12, 10], dtype="float16")
            fluid.layers.sqrt(x=in3)


C
chengduo 已提交
43
class TestActivation(OpTest):
Q
qijun 已提交
44 45
    def setUp(self):
        self.op_type = "exp"
46
        self.init_dtype()
47
        self.init_kernel_type()
48 49 50 51 52 53

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
54 55 56 57 58

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
59 60
        if self.dtype == np.float16:
            return
61
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
62

63
    def init_dtype(self):
64
        self.dtype = np.float64
65

66 67 68
    def init_kernel_type(self):
        pass

Q
qijun 已提交
69

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
class TestParameter(object):
    def test_out(self):
        with fluid.program_guard(fluid.Program()):
            data = fluid.layers.data(name="X", shape=[1])
            out = eval("paddle.%s(data, out=data)" % self.op_type)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result = exe.run(feed={"X": np.array([0.1])},
                             fetch_list=[data, out])
            self.assertEqual(result[0], result[1])

    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
            data = fluid.layers.data(name="X", shape=[1])
            out = eval("paddle.%s(data, name='Y', out=data)" % self.op_type)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result = exe.run(feed={"X": np.array([0.1])},
                             fetch_list=[data, out])
            self.assertEqual(result[0], result[1])

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
            self.assertEqual(z, z_expected)


C
chengduo 已提交
100
class TestSigmoid(TestActivation):
Q
qijun 已提交
101 102
    def setUp(self):
        self.op_type = "sigmoid"
103 104 105 106 107 108 109
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
110

111 112 113
    def init_dtype(self):
        self.dtype = np.float32

114
    def test_check_grad(self):
115 116 117 118
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

119

C
chengduo 已提交
120
class TestLogSigmoid(TestActivation):
121 122
    def setUp(self):
        self.op_type = "logsigmoid"
123 124 125 126 127 128 129
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
130 131

    def test_check_grad(self):
132 133
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
134
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
135 136


137
class TestTanh(TestActivation, TestParameter):
138 139
    def setUp(self):
        self.op_type = "tanh"
140 141 142 143 144 145
        self.init_dtype()
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
146 147

    def test_check_grad(self):
148 149
        if self.dtype == np.float16:
            return
150
        self.check_grad(['X'], 'Out')
151

152 153 154 155 156 157
    def init_dtype(self):
        #TODO If dtype is float64, the output (Out) has diff at CPUPlace
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

158

159
class TestAtan(TestActivation, TestParameter):
160 161 162 163 164 165 166 167 168 169 170 171 172
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
173
        self.check_grad(['X'], 'Out')
174

175 176 177 178 179 180 181 182
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

183

C
chengduo 已提交
184
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
185 186
    def setUp(self):
        self.op_type = "tanh_shrink"
187 188 189 190 191 192 193
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
194 195

    def test_check_grad(self):
196 197
        if self.dtype == np.float16:
            return
198
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
199

200

C
chengduo 已提交
201
class TestHardShrink(TestActivation):
202 203
    def setUp(self):
        self.op_type = "hard_shrink"
204 205
        self.init_dtype()

206
        threshold = 0.5
Z
zhupengyang 已提交
207
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) * 10
208 209
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
210 211

        self.attrs = {'lambda': threshold}
212 213
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
214 215

    def test_check_grad(self):
216 217
        if self.dtype == np.float16:
            return
218
        self.check_grad(['X'], 'Out')
219 220


C
chengduo 已提交
221
class TestSoftShrink(TestActivation):
222 223
    def setUp(self):
        self.op_type = "softshrink"
224 225
        self.init_dtype()

226
        lambda_val = 0.1
Z
zhupengyang 已提交
227
        x = np.random.uniform(0.25, 10, [10, 12]).astype(self.dtype)
228 229 230 231
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

232
        self.attrs = {'lambda': lambda_val}
233 234
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
235 236

    def test_check_grad(self):
237 238
        if self.dtype == np.float16:
            return
239
        self.check_grad(['X'], 'Out')
240

241

242
class TestSqrt(TestActivation, TestParameter):
243 244
    def setUp(self):
        self.op_type = "sqrt"
245 246 247 248 249 250 251
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
252 253

    def test_check_grad(self):
254 255
        if self.dtype == np.float16:
            return
256
        self.check_grad(['X'], 'Out')
257

258

Z
zhoukunsheng 已提交
259 260 261 262 263
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
        self.init_dtype()

Z
zhupengyang 已提交
264
        x = np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
265 266 267 268 269 270 271 272 273 274 275
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.0005)


C
chengduo 已提交
276
class TestAbs(TestActivation):
277 278
    def setUp(self):
        self.op_type = "abs"
279 280
        self.init_dtype()

281
        x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype)
C
chengduo 已提交
282
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
283
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
284
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
285 286
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
287 288 289 290
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
291 292

    def test_check_grad(self):
293 294
        if self.dtype == np.float16:
            return
295
        self.check_grad(['X'], 'Out')
296

297

C
chengduo 已提交
298
class TestCeil(TestActivation):
D
dzhwinter 已提交
299 300
    def setUp(self):
        self.op_type = "ceil"
301 302
        self.init_dtype()

Z
zhupengyang 已提交
303
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
304 305 306 307
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
308

D
dzhwinter 已提交
309
    # The same reason with TestFloor
C
chengduo 已提交
310
    def test_check_grad(self):
311 312 313
        pass


C
chengduo 已提交
314
class TestFloor(TestActivation):
D
dzhwinter 已提交
315 316
    def setUp(self):
        self.op_type = "floor"
317 318
        self.init_dtype()

Z
zhupengyang 已提交
319
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
320 321 322 323
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
324

D
dzhwinter 已提交
325
    # the gradient on floor, ceil, round is undefined.
326
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
327 328
    # The same reason with TestFloor
    def test_check_grad(self):
329 330 331
        pass


C
chengduo 已提交
332
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
333 334
    def setUp(self):
        self.op_type = "cos"
335 336
        self.init_dtype()

Z
zhupengyang 已提交
337
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
338 339 340 341
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
342 343

    def test_check_grad(self):
344 345
        if self.dtype == np.float16:
            return
346
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
347

348

349 350 351 352 353
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()

Z
zhupengyang 已提交
354
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
355 356 357 358 359 360 361 362
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
363
        self.check_grad(['X'], 'Out')
364 365


366
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
367 368
    def setUp(self):
        self.op_type = "sin"
369 370
        self.init_dtype()

Z
zhupengyang 已提交
371
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
372 373 374 375
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
376 377

    def test_check_grad(self):
378 379
        if self.dtype == np.float16:
            return
380
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
381 382


383 384 385 386 387
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()

Z
zhupengyang 已提交
388
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
389 390 391 392 393 394 395 396
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
397
        self.check_grad(['X'], 'Out')
398 399


C
chengduo 已提交
400
class TestRound(TestActivation):
D
dzhwinter 已提交
401 402
    def setUp(self):
        self.op_type = "round"
403 404
        self.init_dtype()

Z
zhupengyang 已提交
405
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
406 407 408 409
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
410

C
chengduo 已提交
411
    def test_check_grad(self):
412 413 414
        pass


C
chengduo 已提交
415
class TestRelu(TestActivation):
416
    def setUp(self):
Q
qijun 已提交
417
        self.op_type = "relu"
K
Kexin Zhao 已提交
418 419 420
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
421 422
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
423 424 425 426
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
427 428

    def test_check_grad(self):
K
Kexin Zhao 已提交
429 430
        if self.dtype == np.float16:
            return
431
        self.check_grad(['X'], 'Out')
A
Adam 已提交
432 433


434 435 436 437 438 439 440 441 442 443 444 445 446 447
class TestReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.sqrt, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.relu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.relu(x_fp16)


A
Adam 已提交
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
class TestLeakyRelu(TestActivation):
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0.02 * x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
464
        self.check_grad(['X'], 'Out')
465 466


467 468 469 470 471 472 473 474 475 476 477 478 479 480
class TestLeakyReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.leaky_relu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.leaky_relu, x_int32)
            # support the input dtype is float32
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float32')
            fluid.layers.leaky_relu(x_fp16)


481 482 483 484 485 486 487 488 489 490
def gelu(x, approximate):
    if approximate:
        y_ref = 0.5 * x * (1.0 + np.tanh(
            np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
491 492 493
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
494 495 496
        approximate = True
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = gelu(x, approximate)
C
Clementine 已提交
497

498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
        approximate = False
C
Clementine 已提交
513
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
514
        out = gelu(x, approximate)
C
Clementine 已提交
515 516 517

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
518
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
519 520 521 522

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
523
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
524 525


C
chengduo 已提交
526
class TestBRelu(TestActivation):
527 528
    def setUp(self):
        self.op_type = "brelu"
529 530
        self.init_dtype()

Z
zhupengyang 已提交
531
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
532 533
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
534 535
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
536
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
537 538 539
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
540 541 542

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
543
        self.outputs = {'Out': t}
544 545

    def test_check_grad(self):
546 547
        if self.dtype == np.float16:
            return
548
        self.check_grad(['X'], 'Out')
549

550

551 552 553 554 555 556 557 558 559 560 561 562 563 564
class TestBReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.brelu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.brelu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.brelu(x_fp16)


C
chengduo 已提交
565
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
566
    def setUp(self):
567
        self.op_type = "relu6"
568 569
        self.init_dtype()

Z
zhupengyang 已提交
570
        x = np.random.uniform(-1, 10, [10, 12]).astype(self.dtype)
571 572 573 574
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
575
        out = np.minimum(np.maximum(x, 0), threshold)
576

577
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
578
        self.attrs = {'threshold': threshold}
579
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
580

581 582 583
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
584
        self.check_grad(['X'], 'Out')
585 586


H
huangjun12 已提交
587 588 589 590 591
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()

Z
zhupengyang 已提交
592
        x = np.random.uniform(-6, 6, [10, 12]).astype(self.dtype)
H
huangjun12 已提交
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
        out = x * np.minimum(np.maximum(x + offset, 0), threshold) / scale

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
608
        self.check_grad(['X'], 'Out')
H
huangjun12 已提交
609 610


C
chengduo 已提交
611
class TestSoftRelu(TestActivation):
612 613
    def setUp(self):
        self.op_type = "soft_relu"
614 615 616
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
617
        threshold = 2.0
Q
qijun 已提交
618 619
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
620
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
621 622 623
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
624 625 626 627 628
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
629 630

    def test_check_grad(self):
631 632
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
633
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
634

635

C
chengduo 已提交
636
class TestELU(TestActivation):
637 638
    def setUp(self):
        self.op_type = "elu"
639 640
        self.init_dtype()

Z
zhupengyang 已提交
641
        x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
642
        alpha = 1.
643
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
644 645 646 647
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
648
        self.outputs = {'Out': out}
649 650

    def test_check_grad(self):
651 652
        if self.dtype == np.float16:
            return
653
        self.check_grad(['X'], 'Out')
654 655


656
class TestELUOpError(unittest.TestCase):
657 658 659 660 661 662 663 664 665 666 667
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of elu_op must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.elu, x1)
            # The input dtype of elu_op must be float16 float32 or float64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.elu, x2)


C
chengduo 已提交
668
class TestReciprocal(TestActivation):
Q
qijun 已提交
669 670
    def setUp(self):
        self.op_type = "reciprocal"
671 672 673 674 675 676 677
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
678 679

    def test_check_grad(self):
680 681
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
682
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
683 684


C
chengduo 已提交
685
class TestLog(TestActivation):
Q
qijun 已提交
686 687
    def setUp(self):
        self.op_type = "log"
688 689 690 691 692 693 694
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
695 696

    def test_check_grad(self):
697 698
        if self.dtype == np.float16:
            return
699
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
700

701

C
chengduo 已提交
702
class TestSquare(TestActivation):
Q
qijun 已提交
703 704
    def setUp(self):
        self.op_type = "square"
705 706 707 708 709 710 711
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
712 713

    def test_check_grad(self):
714 715
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
716
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
717

718

C
chengduo 已提交
719
class TestPow(TestActivation):
720 721
    def setUp(self):
        self.op_type = "pow"
722 723 724 725 726 727
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
728
        self.attrs = {'factor': 3.0}
729
        self.outputs = {'Out': out}
730 731

    def test_check_grad(self):
732 733
        if self.dtype == np.float16:
            return
734
        self.check_grad(['X'], 'Out')
735

736

737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
            'FactorTensor': np.array([3.0]).astype("float32")
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
759
        self.check_grad(['X'], 'Out')
760 761

    def test_api(self):
762
        import paddle
763 764 765 766 767
        import paddle.fluid as fluid

        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32")
768 769 770 771 772
        res = fluid.layers.data(
            name="res",
            shape=[11, 17],
            append_batch_size=False,
            dtype="float32")
773 774 775 776 777

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)
778 779 780 781 782
        out_3 = paddle.pow(x, factor_1, out=res)
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_5 = paddle.pow(x, factor_1, out=res, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
783 784

        exe = fluid.Executor(place=fluid.CPUPlace())
785 786 787 788
        res_1, res_2, res_3, res, res_6 = exe.run(
            fluid.default_main_program(),
            feed={"x": input},
            fetch_list=[out_1, out_2, out_3, res, out_6])
789 790 791

        assert np.array_equal(res_1, np.power(input, 2))
        assert np.array_equal(res_2, np.power(input, 3))
792 793
        assert np.array_equal(res_3, res)
        assert np.array_equal(res_6, np.power(input, 3))
794 795


C
chengduo 已提交
796
class TestSTanh(TestActivation):
797 798
    def setUp(self):
        self.op_type = "stanh"
799 800 801
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
802 803
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
804 805 806
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
807
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
808
        self.outputs = {'Out': out}
809

Q
qijun 已提交
810
    def test_check_grad(self):
811 812
        if self.dtype == np.float16:
            return
813
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
814

815

C
chengduo 已提交
816
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
817 818
    def setUp(self):
        self.op_type = "softplus"
819
        self.init_dtype()
C
chengduo 已提交
820
        self.dtype = np.float64
821 822 823 824 825 826

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
827 828

    def test_check_grad(self):
829 830
        if self.dtype == np.float16:
            return
831
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
832

833

C
chengduo 已提交
834
class TestSoftsign(TestActivation):
835 836
    def setUp(self):
        self.op_type = "softsign"
837 838 839 840 841 842 843
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
844 845

    def test_check_grad(self):
846 847
        if self.dtype == np.float16:
            return
848
        self.check_grad(['X'], 'Out')
849 850


C
chengduo 已提交
851
class TestThresholdedRelu(TestActivation):
852 853
    def setUp(self):
        self.op_type = "thresholded_relu"
854 855
        self.init_dtype()

856
        threshold = 0.25
Z
zhupengyang 已提交
857
        self.delta = 0.005
858
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
859 860

        # Same reason as TestAbs
Z
zhupengyang 已提交
861
        X[np.abs(X - threshold) < self.delta] = threshold + 0.2
862
        out = (X > threshold) * X
863

864
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
865
        self.attrs = {'threshold': threshold}
866
        self.outputs = {'Out': out}
867 868

    def test_check_grad(self):
869 870
        if self.dtype == np.float16:
            return
871
        self.check_grad(['X'], 'Out')
872 873


C
chengduo 已提交
874
class TestHardSigmoid(TestActivation):
875 876
    def setUp(self):
        self.op_type = "hard_sigmoid"
877 878
        self.init_dtype()

Z
zhupengyang 已提交
879
        X = np.random.uniform(-5, 5, [10, 12]).astype("float32")
880 881 882 883 884
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

Z
zhupengyang 已提交
885 886
        self.delta = 0.005

887
        # Same reason as TestAbs
Z
zhupengyang 已提交
888 889
        X[(X - lower_threshold) < self.delta] = lower_threshold - 0.02
        X[(X - upper_threshold) < self.delta] = upper_threshold + 0.02
890 891

        temp = X * slope + offset
892 893 894 895
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
896 897

    def test_check_grad(self):
898 899
        if self.dtype == np.float16:
            return
Z
zhupengyang 已提交
900
        self.check_grad(['X'], 'Out')
901

902

C
chengduo 已提交
903
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
904 905
    def setUp(self):
        self.op_type = "swish"
906 907 908 909 910 911 912 913 914
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
915 916

    def test_check_grad(self):
917 918
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
919
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
920

921

922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
941 942 943 944 945 946 947 948 949 950
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
951

C
chengduo 已提交
952
        def test_check_output(self):
953
            place = core.CUDAPlace(0)
C
chengduo 已提交
954 955 956
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
957

C
chengduo 已提交
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
982
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
983
create_test_act_fp16_class(TestSin)
984 985
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
C
chengduo 已提交
986 987
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
988
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
989 990 991 992 993 994 995 996
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
997
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
998 999 1000 1001 1002 1003
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
H
huangjun12 已提交
1004
create_test_act_fp16_class(TestHardSwish)
A
Abhinav Arora 已提交
1005

Q
qijun 已提交
1006 1007
if __name__ == "__main__":
    unittest.main()