test_activation_op.py 29.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
22
import paddle
23 24
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
Q
qijun 已提交
25 26


27
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
            self.assertRaises(TypeError, fluid.layers.sqrt, in1)
            # The input dtype of sqrt op must be float16, float32, float64.
            in2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.sqrt, in2)

            in3 = fluid.layers.data(
                name='input3', shape=[12, 10], dtype="float16")
            fluid.layers.sqrt(x=in3)


C
chengduo 已提交
43
class TestActivation(OpTest):
Q
qijun 已提交
44 45
    def setUp(self):
        self.op_type = "exp"
46
        self.init_dtype()
47
        self.init_kernel_type()
48 49 50 51 52 53

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
54 55 56 57 58

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
59 60
        if self.dtype == np.float16:
            return
61
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
62

63
    def init_dtype(self):
64
        self.dtype = np.float64
65

66 67 68
    def init_kernel_type(self):
        pass

Q
qijun 已提交
69

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
class TestParameter(object):
    def test_out(self):
        with fluid.program_guard(fluid.Program()):
            data = fluid.layers.data(name="X", shape=[1])
            out = eval("paddle.%s(data, out=data)" % self.op_type)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result = exe.run(feed={"X": np.array([0.1])},
                             fetch_list=[data, out])
            self.assertEqual(result[0], result[1])

    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
            data = fluid.layers.data(name="X", shape=[1])
            out = eval("paddle.%s(data, name='Y', out=data)" % self.op_type)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result = exe.run(feed={"X": np.array([0.1])},
                             fetch_list=[data, out])
            self.assertEqual(result[0], result[1])

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
            self.assertEqual(z, z_expected)


C
chengduo 已提交
100
class TestSigmoid(TestActivation):
Q
qijun 已提交
101 102
    def setUp(self):
        self.op_type = "sigmoid"
103 104 105 106 107 108 109
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
110

111 112 113
    def init_dtype(self):
        self.dtype = np.float32

114
    def test_check_grad(self):
115 116 117 118
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

119

C
chengduo 已提交
120
class TestLogSigmoid(TestActivation):
121 122
    def setUp(self):
        self.op_type = "logsigmoid"
123 124 125 126 127 128 129
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
130 131

    def test_check_grad(self):
132 133
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
134
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
135 136


137
class TestTanh(TestActivation, TestParameter):
138 139
    def setUp(self):
        self.op_type = "tanh"
140 141 142 143 144 145
        self.init_dtype()
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
146 147

    def test_check_grad(self):
148 149
        if self.dtype == np.float16:
            return
150
        self.check_grad(['X'], 'Out')
151

152 153 154 155 156 157
    def init_dtype(self):
        #TODO If dtype is float64, the output (Out) has diff at CPUPlace
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

158

159
class TestAtan(TestActivation, TestParameter):
160 161 162 163 164 165 166 167 168 169 170 171 172
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
173
        self.check_grad(['X'], 'Out')
174

175 176 177 178 179 180 181 182
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

183

C
chengduo 已提交
184
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
185 186
    def setUp(self):
        self.op_type = "tanh_shrink"
187 188 189 190 191 192 193
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
194 195

    def test_check_grad(self):
196 197
        if self.dtype == np.float16:
            return
198
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
199

200

C
chengduo 已提交
201
class TestHardShrink(TestActivation):
202 203
    def setUp(self):
        self.op_type = "hard_shrink"
204 205
        self.init_dtype()

206
        threshold = 0.5
Z
zhupengyang 已提交
207
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) * 10
208 209
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
210 211

        self.attrs = {'lambda': threshold}
212 213
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
214 215

    def test_check_grad(self):
216 217
        if self.dtype == np.float16:
            return
218
        self.check_grad(['X'], 'Out')
219 220


C
chengduo 已提交
221
class TestSoftShrink(TestActivation):
222 223
    def setUp(self):
        self.op_type = "softshrink"
224 225
        self.init_dtype()

226
        lambda_val = 0.1
Z
zhupengyang 已提交
227
        x = np.random.uniform(0.25, 10, [10, 12]).astype(self.dtype)
228 229 230 231
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

232
        self.attrs = {'lambda': lambda_val}
233 234
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
235 236

    def test_check_grad(self):
237 238
        if self.dtype == np.float16:
            return
239
        self.check_grad(['X'], 'Out')
240

241

242
class TestSqrt(TestActivation, TestParameter):
243 244
    def setUp(self):
        self.op_type = "sqrt"
245 246 247 248 249 250 251
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
252 253

    def test_check_grad(self):
254 255
        if self.dtype == np.float16:
            return
256
        self.check_grad(['X'], 'Out')
257

258

Z
zhoukunsheng 已提交
259 260 261 262 263
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
        self.init_dtype()

Z
zhupengyang 已提交
264
        x = np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
265 266 267 268 269 270 271 272 273 274 275
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.0005)


C
chengduo 已提交
276
class TestAbs(TestActivation):
277 278
    def setUp(self):
        self.op_type = "abs"
279 280
        self.init_dtype()

281
        x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype)
C
chengduo 已提交
282
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
283
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
284
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
285 286
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
287 288 289 290
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
291 292

    def test_check_grad(self):
293 294
        if self.dtype == np.float16:
            return
295
        self.check_grad(['X'], 'Out')
296

297

C
chengduo 已提交
298
class TestCeil(TestActivation):
D
dzhwinter 已提交
299 300
    def setUp(self):
        self.op_type = "ceil"
301 302
        self.init_dtype()

Z
zhupengyang 已提交
303
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
304 305 306 307
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
308

D
dzhwinter 已提交
309
    # The same reason with TestFloor
C
chengduo 已提交
310
    def test_check_grad(self):
311 312 313
        pass


C
chengduo 已提交
314
class TestFloor(TestActivation):
D
dzhwinter 已提交
315 316
    def setUp(self):
        self.op_type = "floor"
317 318
        self.init_dtype()

Z
zhupengyang 已提交
319
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
320 321 322 323
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
324

D
dzhwinter 已提交
325
    # the gradient on floor, ceil, round is undefined.
326
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
327 328
    # The same reason with TestFloor
    def test_check_grad(self):
329 330 331
        pass


C
chengduo 已提交
332
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
333 334
    def setUp(self):
        self.op_type = "cos"
335 336
        self.init_dtype()

Z
zhupengyang 已提交
337
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
338 339 340 341
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
342 343

    def test_check_grad(self):
344 345
        if self.dtype == np.float16:
            return
346
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
347

348

349 350 351 352 353
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()

Z
zhupengyang 已提交
354
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
355 356 357 358 359 360 361 362
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
363
        self.check_grad(['X'], 'Out')
364 365


366
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
367 368
    def setUp(self):
        self.op_type = "sin"
369 370
        self.init_dtype()

Z
zhupengyang 已提交
371
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
372 373 374 375
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
376 377

    def test_check_grad(self):
378 379
        if self.dtype == np.float16:
            return
380
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
381 382


383 384 385 386 387
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()

Z
zhupengyang 已提交
388
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
389 390 391 392 393 394 395 396
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
397
        self.check_grad(['X'], 'Out')
398 399


C
chengduo 已提交
400
class TestRound(TestActivation):
D
dzhwinter 已提交
401 402
    def setUp(self):
        self.op_type = "round"
403 404
        self.init_dtype()

Z
zhupengyang 已提交
405
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
406 407 408 409
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
410

C
chengduo 已提交
411
    def test_check_grad(self):
412 413 414
        pass


C
chengduo 已提交
415
class TestRelu(TestActivation):
416
    def setUp(self):
Q
qijun 已提交
417
        self.op_type = "relu"
K
Kexin Zhao 已提交
418 419 420
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
421 422
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
423 424 425 426
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
427 428

    def test_check_grad(self):
K
Kexin Zhao 已提交
429 430
        if self.dtype == np.float16:
            return
431
        self.check_grad(['X'], 'Out')
A
Adam 已提交
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449


class TestLeakyRelu(TestActivation):
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0.02 * x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
450
        self.check_grad(['X'], 'Out')
451 452


453 454 455 456 457 458 459 460 461 462 463 464 465 466
class TestLeakyReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.leaky_relu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.leaky_relu, x_int32)
            # support the input dtype is float32
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float32')
            fluid.layers.leaky_relu(x_fp16)


467 468 469 470 471 472 473 474 475 476
def gelu(x, approximate):
    if approximate:
        y_ref = 0.5 * x * (1.0 + np.tanh(
            np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
477 478 479
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
480 481 482
        approximate = True
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = gelu(x, approximate)
C
Clementine 已提交
483

484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
        approximate = False
C
Clementine 已提交
499
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
500
        out = gelu(x, approximate)
C
Clementine 已提交
501 502 503

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
504
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
505 506 507 508

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
509
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
510 511


C
chengduo 已提交
512
class TestBRelu(TestActivation):
513 514
    def setUp(self):
        self.op_type = "brelu"
515 516
        self.init_dtype()

Z
zhupengyang 已提交
517
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
518 519
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
520 521
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
522
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
523 524 525
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
526 527 528

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
529
        self.outputs = {'Out': t}
530 531

    def test_check_grad(self):
532 533
        if self.dtype == np.float16:
            return
534
        self.check_grad(['X'], 'Out')
535

536

537 538 539 540 541 542 543 544 545 546 547 548 549 550
class TestBReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.brelu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.brelu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.brelu(x_fp16)


C
chengduo 已提交
551
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
552
    def setUp(self):
553
        self.op_type = "relu6"
554 555
        self.init_dtype()

Z
zhupengyang 已提交
556
        x = np.random.uniform(-1, 10, [10, 12]).astype(self.dtype)
557 558 559 560
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
561
        out = np.minimum(np.maximum(x, 0), threshold)
562

563
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
564
        self.attrs = {'threshold': threshold}
565
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
566

567 568 569
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
570
        self.check_grad(['X'], 'Out')
571 572


H
huangjun12 已提交
573 574 575 576 577
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()

Z
zhupengyang 已提交
578
        x = np.random.uniform(-6, 6, [10, 12]).astype(self.dtype)
H
huangjun12 已提交
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
        out = x * np.minimum(np.maximum(x + offset, 0), threshold) / scale

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
594
        self.check_grad(['X'], 'Out')
H
huangjun12 已提交
595 596


C
chengduo 已提交
597
class TestSoftRelu(TestActivation):
598 599
    def setUp(self):
        self.op_type = "soft_relu"
600 601 602
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
603
        threshold = 2.0
Q
qijun 已提交
604 605
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
606
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
607 608 609
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
610 611 612 613 614
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
615 616

    def test_check_grad(self):
617 618
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
619
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
620

621

C
chengduo 已提交
622
class TestELU(TestActivation):
623 624
    def setUp(self):
        self.op_type = "elu"
625 626
        self.init_dtype()

Z
zhupengyang 已提交
627
        x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
628
        alpha = 1.
629
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
630 631 632 633
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
634
        self.outputs = {'Out': out}
635 636

    def test_check_grad(self):
637 638
        if self.dtype == np.float16:
            return
639
        self.check_grad(['X'], 'Out')
640 641


642
class TestELUOpError(unittest.TestCase):
643 644 645 646 647 648 649 650 651 652 653
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of elu_op must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.elu, x1)
            # The input dtype of elu_op must be float16 float32 or float64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.elu, x2)


C
chengduo 已提交
654
class TestReciprocal(TestActivation):
Q
qijun 已提交
655 656
    def setUp(self):
        self.op_type = "reciprocal"
657 658 659 660 661 662 663
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
664 665

    def test_check_grad(self):
666 667
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
668
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
669 670


C
chengduo 已提交
671
class TestLog(TestActivation):
Q
qijun 已提交
672 673
    def setUp(self):
        self.op_type = "log"
674 675 676 677 678 679 680
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
681 682

    def test_check_grad(self):
683 684
        if self.dtype == np.float16:
            return
685
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
686

687

C
chengduo 已提交
688
class TestSquare(TestActivation):
Q
qijun 已提交
689 690
    def setUp(self):
        self.op_type = "square"
691 692 693 694 695 696 697
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
698 699

    def test_check_grad(self):
700 701
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
702
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
703

704

C
chengduo 已提交
705
class TestPow(TestActivation):
706 707
    def setUp(self):
        self.op_type = "pow"
708 709 710 711 712 713
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
714
        self.attrs = {'factor': 3.0}
715
        self.outputs = {'Out': out}
716 717

    def test_check_grad(self):
718 719
        if self.dtype == np.float16:
            return
720
        self.check_grad(['X'], 'Out')
721

722

723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
            'FactorTensor': np.array([3.0]).astype("float32")
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
745
        self.check_grad(['X'], 'Out')
746 747

    def test_api(self):
748
        import paddle
749 750 751 752 753
        import paddle.fluid as fluid

        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32")
754 755 756 757 758
        res = fluid.layers.data(
            name="res",
            shape=[11, 17],
            append_batch_size=False,
            dtype="float32")
759 760 761 762 763

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)
764 765 766 767 768
        out_3 = paddle.pow(x, factor_1, out=res)
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_5 = paddle.pow(x, factor_1, out=res, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
769 770

        exe = fluid.Executor(place=fluid.CPUPlace())
771 772 773 774
        res_1, res_2, res_3, res, res_6 = exe.run(
            fluid.default_main_program(),
            feed={"x": input},
            fetch_list=[out_1, out_2, out_3, res, out_6])
775 776 777

        assert np.array_equal(res_1, np.power(input, 2))
        assert np.array_equal(res_2, np.power(input, 3))
778 779
        assert np.array_equal(res_3, res)
        assert np.array_equal(res_6, np.power(input, 3))
780 781


C
chengduo 已提交
782
class TestSTanh(TestActivation):
783 784
    def setUp(self):
        self.op_type = "stanh"
785 786 787
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
788 789
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
790 791 792
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
793
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
794
        self.outputs = {'Out': out}
795

Q
qijun 已提交
796
    def test_check_grad(self):
797 798
        if self.dtype == np.float16:
            return
799
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
800

801

C
chengduo 已提交
802
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
803 804
    def setUp(self):
        self.op_type = "softplus"
805
        self.init_dtype()
C
chengduo 已提交
806
        self.dtype = np.float64
807 808 809 810 811 812

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
813 814

    def test_check_grad(self):
815 816
        if self.dtype == np.float16:
            return
817
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
818

819

C
chengduo 已提交
820
class TestSoftsign(TestActivation):
821 822
    def setUp(self):
        self.op_type = "softsign"
823 824 825 826 827 828 829
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
830 831

    def test_check_grad(self):
832 833
        if self.dtype == np.float16:
            return
834
        self.check_grad(['X'], 'Out')
835 836


C
chengduo 已提交
837
class TestThresholdedRelu(TestActivation):
838 839
    def setUp(self):
        self.op_type = "thresholded_relu"
840 841
        self.init_dtype()

842
        threshold = 0.25
Z
zhupengyang 已提交
843
        self.delta = 0.005
844
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
845 846

        # Same reason as TestAbs
Z
zhupengyang 已提交
847
        X[np.abs(X - threshold) < self.delta] = threshold + 0.2
848
        out = (X > threshold) * X
849

850
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
851
        self.attrs = {'threshold': threshold}
852
        self.outputs = {'Out': out}
853 854

    def test_check_grad(self):
855 856
        if self.dtype == np.float16:
            return
857
        self.check_grad(['X'], 'Out')
858 859


C
chengduo 已提交
860
class TestHardSigmoid(TestActivation):
861 862
    def setUp(self):
        self.op_type = "hard_sigmoid"
863 864
        self.init_dtype()

Z
zhupengyang 已提交
865
        X = np.random.uniform(-5, 5, [10, 12]).astype("float32")
866 867 868 869 870
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

Z
zhupengyang 已提交
871 872
        self.delta = 0.005

873
        # Same reason as TestAbs
Z
zhupengyang 已提交
874 875
        X[(X - lower_threshold) < self.delta] = lower_threshold - 0.02
        X[(X - upper_threshold) < self.delta] = upper_threshold + 0.02
876 877

        temp = X * slope + offset
878 879 880 881
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
882 883

    def test_check_grad(self):
884 885
        if self.dtype == np.float16:
            return
Z
zhupengyang 已提交
886
        self.check_grad(['X'], 'Out')
887

888

C
chengduo 已提交
889
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
890 891
    def setUp(self):
        self.op_type = "swish"
892 893 894 895 896 897 898 899 900
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
901 902

    def test_check_grad(self):
903 904
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
905
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
906

907

908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
927 928 929 930 931 932 933 934 935 936
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
937

C
chengduo 已提交
938
        def test_check_output(self):
939
            place = core.CUDAPlace(0)
C
chengduo 已提交
940 941 942
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
943

C
chengduo 已提交
944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
968
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
969
create_test_act_fp16_class(TestSin)
970 971
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
C
chengduo 已提交
972 973
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
974
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
975 976 977 978 979 980 981 982
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
983
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
984 985 986 987 988 989
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
H
huangjun12 已提交
990
create_test_act_fp16_class(TestHardSwish)
A
Abhinav Arora 已提交
991

Q
qijun 已提交
992 993
if __name__ == "__main__":
    unittest.main()