test_activation_op.py 28.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
A
Abhinav Arora 已提交
21
from scipy.special import expit
Q
qijun 已提交
22 23 24 25 26


class TestExp(OpTest):
    def setUp(self):
        self.op_type = "exp"
27 28 29 30 31 32 33 34
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
35 36 37 38 39

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
40 41
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
42
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
43

44 45 46 47 48 49 50 51 52 53 54 55 56 57
    def init_dtype(self):
        pass


class TestFP16Exp(TestExp):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

Q
qijun 已提交
58 59 60 61

class TestSigmoid(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
62 63 64 65 66 67 68 69
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
70 71 72 73

    def test_check_output(self):
        self.check_output()

74
    def test_check_grad(self):
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

    def init_dtype(self):
        pass


class TestFP16Sigmoid(TestSigmoid):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)
92 93


94 95 96
class TestLogSigmoid(OpTest):
    def setUp(self):
        self.op_type = "logsigmoid"
97 98 99 100 101 102 103 104
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
105 106 107 108 109

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
110 111
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
112
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
113

114 115 116 117 118 119 120 121 122 123 124 125 126 127
    def init_dtype(self):
        pass


class TestFP16LogSigmoid(TestLogSigmoid):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

128

129 130 131
class TestTanh(OpTest):
    def setUp(self):
        self.op_type = "tanh"
132 133 134 135 136 137 138 139
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
140 141 142 143 144

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
145 146
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
147
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
148

149 150 151 152 153 154 155 156 157 158 159 160 161 162
    def init_dtype(self):
        pass


class TestFP16Tanh(TestTanh):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

163

K
Kavya Srinet 已提交
164 165 166
class TestTanhShrink(OpTest):
    def setUp(self):
        self.op_type = "tanh_shrink"
167 168 169 170 171 172 173 174
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
175 176 177 178 179

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
180 181
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
182
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
K
Kavya Srinet 已提交
183

184 185 186 187 188 189 190 191 192 193 194 195 196 197
    def init_dtype(self):
        pass


class TestFP16TanhShrink(TestTanhShrink):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

K
Kavya Srinet 已提交
198

199 200 201
class TestHardShrink(OpTest):
    def setUp(self):
        self.op_type = "hard_shrink"
202 203 204
        self.dtype = np.float32
        self.init_dtype()

205
        threshold = 0.5
206 207 208
        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
209 210

        self.attrs = {'lambda': threshold}
211 212
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
213 214 215 216 217

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
218 219
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
220
        self.check_grad(['X'], 'Out', max_relative_error=0.005)
221

222 223 224 225 226 227 228 229 230 231 232 233 234 235
    def init_dtype(self):
        pass


class TestFP16HardShrink(TestHardShrink):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

236

237 238 239
class TestSoftShrink(OpTest):
    def setUp(self):
        self.op_type = "softshrink"
240 241 242
        self.dtype = np.float32
        self.init_dtype()

243
        lambda_val = 0.1
244 245 246 247 248
        x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

249
        self.attrs = {'lambda': lambda_val}
250 251
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
252 253 254 255 256

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
257 258
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
259
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
260

261 262 263 264 265 266 267 268 269 270 271 272 273 274
    def init_dtype(self):
        pass


class TestFP16SoftShrink(TestSoftShrink):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

275

276 277 278
class TestSqrt(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
279 280 281 282 283 284 285 286
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
287 288 289 290 291

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
292 293
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
294
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
295

296 297 298 299 300 301 302 303 304 305 306 307 308 309
    def init_dtype(self):
        pass


class TestFP16Sqrt(TestSqrt):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

310 311 312 313

class TestAbs(OpTest):
    def setUp(self):
        self.op_type = "abs"
314 315 316 317
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
C
chengduo 已提交
318
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
319
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
320
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
321 322
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
323 324 325 326
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
327 328 329 330 331

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
332 333
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
334
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
335

336 337 338 339 340 341 342 343 344 345 346 347 348 349
    def init_dtype(self):
        pass


class TestFP16Abs(TestAbs):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

350

D
dzhwinter 已提交
351 352 353
class TestCeil(OpTest):
    def setUp(self):
        self.op_type = "ceil"
354 355 356 357 358 359 360 361
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
362 363 364 365

    def test_check_output(self):
        self.check_output()

D
dzhwinter 已提交
366
    # The same reason with TestFloor
D
dzhwinter 已提交
367

368 369 370 371 372 373 374 375 376 377 378 379 380 381
    def init_dtype(self):
        pass


class TestFP16Ceil(TestCeil):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

D
dzhwinter 已提交
382 383 384 385

class TestFloor(OpTest):
    def setUp(self):
        self.op_type = "floor"
386 387 388 389 390 391 392 393
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
394 395 396 397

    def test_check_output(self):
        self.check_output()

D
dzhwinter 已提交
398 399
    # the gradient on floor, ceil, round is undefined.
    # we return zero as gradient, but the numpy return nan 
D
dzhwinter 已提交
400

401 402 403 404 405 406 407 408 409 410 411 412 413 414
    def init_dtype(self):
        pass


class TestFP16Floor(TestFloor):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

D
dzhwinter 已提交
415

C
add cos  
chengduoZH 已提交
416 417 418
class TestCos(OpTest):
    def setUp(self):
        self.op_type = "cos"
419 420 421 422 423 424 425 426
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
427 428 429 430 431

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
432 433
        if self.dtype == np.float16:
            return
C
add sin  
chengduoZH 已提交
434 435
        self.check_grad(['X'], 'Out', max_relative_error=0.007)

436 437 438 439 440 441 442 443 444 445 446 447 448 449
    def init_dtype(self):
        pass


class TestFP16Cos(TestCos):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

C
add sin  
chengduoZH 已提交
450 451 452 453

class TestSin(OpTest):
    def setUp(self):
        self.op_type = "sin"
454 455 456 457 458 459 460 461
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
462 463 464 465 466

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
467 468
        if self.dtype == np.float16:
            return
C
add cos  
chengduoZH 已提交
469 470
        self.check_grad(['X'], 'Out', max_relative_error=0.007)

471 472 473 474 475 476 477 478 479 480 481 482 483 484
    def init_dtype(self):
        pass


class TestFP16Sin(TestSin):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

C
add cos  
chengduoZH 已提交
485

D
dzhwinter 已提交
486 487 488
class TestRound(OpTest):
    def setUp(self):
        self.op_type = "round"
489 490 491 492 493 494 495 496
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
497 498 499 500

    def test_check_output(self):
        self.check_output()

501 502 503 504 505 506 507 508 509 510 511 512 513 514
    def init_dtype(self):
        pass


class TestFP16Round(TestRound):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

D
dzhwinter 已提交
515

Q
qijun 已提交
516
class TestRelu(OpTest):
517
    def setUp(self):
Q
qijun 已提交
518
        self.op_type = "relu"
K
Kexin Zhao 已提交
519 520 521 522
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
523 524
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
525 526 527 528
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
529 530 531 532 533

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
K
Kexin Zhao 已提交
534 535
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
536
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
537

K
Kexin Zhao 已提交
538 539 540 541 542 543 544 545 546 547 548 549 550 551
    def init_dtype(self):
        pass


class TestFP16Relu(TestRelu):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

552 553 554 555

class TestBRelu(OpTest):
    def setUp(self):
        self.op_type = "brelu"
556 557 558 559
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
560 561
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
562 563
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
564
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
565 566 567
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
568 569 570

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
571
        self.outputs = {'Out': t}
572 573 574 575 576

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
577 578
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
579
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
580

581 582 583 584 585 586 587 588 589 590 591 592 593 594
    def init_dtype(self):
        pass


class TestFP16BRelu(TestBRelu):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

595

596
class TestRelu6(OpTest):
K
Kavya Srinet 已提交
597
    def setUp(self):
598
        self.op_type = "relu6"
599 600 601 602
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype)
603 604 605 606
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
607
        out = np.minimum(np.maximum(x, 0), threshold)
608

609
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
610
        self.attrs = {'threshold': threshold}
611
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
612 613 614 615

    def test_check_output(self):
        self.check_output()

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.02)

    def init_dtype(self):
        pass


class TestFP16Relu6(TestRelu6):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)
K
Kavya Srinet 已提交
634 635


636 637 638
class TestSoftRelu(OpTest):
    def setUp(self):
        self.op_type = "soft_relu"
639 640 641 642
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
643
        threshold = 2.0
Q
qijun 已提交
644 645 646
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
        x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
647 648 649
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
650 651 652 653 654
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
655 656 657 658 659

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
660 661
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
662
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
663

664 665 666 667 668 669 670 671 672 673 674 675 676 677
    def init_dtype(self):
        pass


class TestFP16SoftRelu(TestSoftRelu):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

678

679 680 681
class TestELU(OpTest):
    def setUp(self):
        self.op_type = "elu"
682 683 684 685
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
686
        alpha = 1.
687
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
688 689 690 691
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
692
        self.outputs = {'Out': out}
693 694 695 696 697

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
698 699
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
700
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
701

702 703 704 705 706 707 708 709 710 711 712 713 714 715
    def init_dtype(self):
        pass


class TestFP16ELU(TestELU):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

716

Q
qijun 已提交
717 718 719
class TestReciprocal(OpTest):
    def setUp(self):
        self.op_type = "reciprocal"
720 721 722 723 724 725 726 727
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
728 729 730 731 732

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
733 734
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
735
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
736

737 738 739 740 741 742 743 744 745 746 747 748 749 750
    def init_dtype(self):
        pass


class TestFP16Reciprocal(TestReciprocal):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

Q
qijun 已提交
751 752 753 754

class TestLog(OpTest):
    def setUp(self):
        self.op_type = "log"
755 756 757 758 759 760 761 762
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
763 764 765 766 767

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
768 769
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
770
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
771

772 773 774 775 776 777 778 779 780 781 782 783 784 785
    def init_dtype(self):
        pass


class TestFP16Log(TestLog):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

Q
qijun 已提交
786 787 788 789

class TestSquare(OpTest):
    def setUp(self):
        self.op_type = "square"
790 791 792 793 794 795 796 797
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
798 799 800 801 802

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
803 804
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
805
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
806

807 808 809 810 811 812 813 814 815 816 817 818 819 820
    def init_dtype(self):
        pass


class TestFP16Square(TestSquare):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

Q
qijun 已提交
821

822 823 824
class TestPow(OpTest):
    def setUp(self):
        self.op_type = "pow"
825 826 827 828 829 830 831
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
832
        self.attrs = {'factor': 3.0}
833
        self.outputs = {'Out': out}
834 835 836 837 838

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
839 840
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
841
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
842

843 844 845 846 847 848 849 850 851 852 853 854 855 856
    def init_dtype(self):
        pass


class TestFP16Pow(TestPow):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=5e-2)

857 858 859 860

class TestSTanh(OpTest):
    def setUp(self):
        self.op_type = "stanh"
861 862 863 864
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
865 866
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
867 868 869
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
870
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
871
        self.outputs = {'Out': out}
872 873 874 875

    def test_check_output(self):
        self.check_output()

Q
qijun 已提交
876
    def test_check_grad(self):
877 878
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
879
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
880

881 882 883 884 885 886 887 888 889 890 891 892 893 894
    def init_dtype(self):
        pass


class TestFP16STanh(TestSTanh):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

Q
qijun 已提交
895

K
kexinzhao 已提交
896 897 898
class TestSoftplus(OpTest):
    def setUp(self):
        self.op_type = "softplus"
899 900 901 902 903 904 905 906
        self.dtype = np.float64
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
907 908 909 910 911

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
912 913
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
914
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
K
kexinzhao 已提交
915

916 917 918 919 920 921 922 923 924 925 926 927 928 929
    def init_dtype(self):
        pass


class TestFP16Softplus(TestSoftplus):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

K
kexinzhao 已提交
930

931 932 933
class TestSoftsign(OpTest):
    def setUp(self):
        self.op_type = "softsign"
934 935 936 937 938 939 940 941
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
942 943 944 945 946

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
947 948
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
949
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
950

951 952 953 954 955 956 957 958 959 960 961 962 963 964
    def init_dtype(self):
        pass


class TestFP16Softsign(TestSoftsign):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

965

966 967 968
class TestThresholdedRelu(OpTest):
    def setUp(self):
        self.op_type = "thresholded_relu"
969 970 971
        self.dtype = np.float32
        self.init_dtype()

972 973
        threshold = 0.25
        self.relative_error = 0.005
974
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
975 976 977

        # Same reason as TestAbs
        X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2
978
        out = (X > threshold) * X
979

980
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
981
        self.attrs = {'threshold': threshold}
982
        self.outputs = {'Out': out}
983 984 985 986 987

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
988 989
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
990
        self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
991

992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
    def init_dtype(self):
        pass


class TestFP16ThresholdedRelu(TestThresholdedRelu):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

1006

1007 1008 1009
class TestHardSigmoid(OpTest):
    def setUp(self):
        self.op_type = "hard_sigmoid"
1010 1011 1012
        self.dtype = np.float32
        self.init_dtype()

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
        self.relative_error = 0.002

        X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

        # Same reason as TestAbs
        X[np.abs(X - lower_threshold) < self.relative_error] = \
            lower_threshold + 0.2
        X[np.abs(X - upper_threshold) < self.relative_error] = \
            upper_threshold - 0.2

        temp = X * slope + offset
1028 1029 1030 1031
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
1032 1033 1034 1035 1036

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
1037 1038
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
1039
        self.check_grad(['X'], 'Out', max_relative_error=0.002)
1040

1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
    def init_dtype(self):
        pass


class TestFP16HardSigmoid(TestHardSigmoid):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

1055

A
Abhinav Arora 已提交
1056 1057 1058
class TestSwish(OpTest):
    def setUp(self):
        self.op_type = "swish"
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
        self.dtype = np.float32
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
1069 1070 1071 1072 1073

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
1074 1075
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
1076
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
1077

1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
    def init_dtype(self):
        pass


class TestFP16Swish(TestSwish):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

A
Abhinav Arora 已提交
1092

Q
qijun 已提交
1093 1094
if __name__ == "__main__":
    unittest.main()