test_activation_op.py 16.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
qijun 已提交
15 16
import unittest
import numpy as np
C
add cos  
chengduoZH 已提交
17
import math
K
Kexin Zhao 已提交
18
import paddle.fluid.core as core
Q
qijun 已提交
19
from op_test import OpTest
A
Abhinav Arora 已提交
20
from scipy.special import expit
Q
qijun 已提交
21 22 23 24 25 26 27 28


class TestExp(OpTest):
    def setUp(self):
        self.op_type = "exp"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
29
        self.outputs = {'Out': np.exp(self.inputs['X'])}
Q
qijun 已提交
30 31 32 33 34

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
35
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
36 37 38 39 40 41 42 43


class TestSigmoid(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
44
        self.outputs = {'Out': 1 / (1 + np.exp(-self.inputs['X']))}
Q
qijun 已提交
45 46 47 48

    def test_check_output(self):
        self.check_output()

49
    def test_check_grad(self):
F
fengjiayi 已提交
50
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
51 52


53 54 55 56 57 58
class TestLogSigmoid(OpTest):
    def setUp(self):
        self.op_type = "logsigmoid"
        self.inputs = {
            'X': np.random.uniform(-1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
59
        self.outputs = {'Out': np.log(1 / (1 + np.exp(-self.inputs['X'])))}
60 61 62 63 64

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
65
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
66 67


68 69 70 71 72 73
class TestTanh(OpTest):
    def setUp(self):
        self.op_type = "tanh"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
74
        self.outputs = {'Out': np.tanh(self.inputs['X'])}
75 76 77 78 79

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
80
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
81 82


K
Kavya Srinet 已提交
83 84 85 86 87 88
class TestTanhShrink(OpTest):
    def setUp(self):
        self.op_type = "tanh_shrink"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [10, 17]).astype("float32")
        }
F
fengjiayi 已提交
89
        self.outputs = {'Out': self.inputs['X'] - np.tanh(self.inputs['X'])}
K
Kavya Srinet 已提交
90 91 92 93 94

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
95
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
K
Kavya Srinet 已提交
96 97


98 99 100 101 102 103 104 105 106 107 108
class TestHardShrink(OpTest):
    def setUp(self):
        self.op_type = "hard_shrink"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        threshold = 0.5

        self.inputs = {'X': x}
        self.attrs = {'lambda': threshold}

        t = np.copy(x)
        t[(t >= -threshold) & (t <= threshold)] = 0
F
fengjiayi 已提交
109
        self.outputs = {'Out': t}
110 111 112 113 114

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
115
        self.check_grad(['X'], 'Out', max_relative_error=0.005)
116 117


118 119 120 121 122 123 124 125 126 127 128
class TestSoftShrink(OpTest):
    def setUp(self):
        self.op_type = "softshrink"
        lambda_val = 0.1
        self.attrs = {'lambda': lambda_val}
        self.inputs = {
            'X': np.random.uniform(0.25, 10, [4, 4]).astype("float32")
        }
        y = np.copy(self.inputs['X'])
        y = (y < -lambda_val) * (y + lambda_val) + (y > lambda_val) * (
            y - lambda_val)
F
fengjiayi 已提交
129
        self.outputs = {'Out': y}
130 131 132 133 134

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
135
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
136 137


138 139 140 141 142 143
class TestSqrt(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
144
        self.outputs = {'Out': np.sqrt(self.inputs['X'])}
145 146 147 148 149

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
150
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
151 152 153 154 155


class TestAbs(OpTest):
    def setUp(self):
        self.op_type = "abs"
Q
qijun 已提交
156 157 158 159 160 161
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        # Because we set delta = 0.005 in caculating numeric gradient,
        # if x is too small, such as 0.002, x_neg will be -0.003
        # x_pos will be 0.007, so the numeric gradient is unaccurate.
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
162
        self.inputs = {'X': x}
F
fengjiayi 已提交
163
        self.outputs = {'Out': np.abs(self.inputs['X'])}
164 165 166 167 168

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
169
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
170 171


D
dzhwinter 已提交
172 173 174 175 176
class TestCeil(OpTest):
    def setUp(self):
        self.op_type = "ceil"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
F
fengjiayi 已提交
177
        self.outputs = {'Out': np.ceil(self.inputs['X'])}
D
dzhwinter 已提交
178 179 180 181 182

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
183
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
184 185 186 187 188 189 190


class TestFloor(OpTest):
    def setUp(self):
        self.op_type = "floor"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
Q
Qiao Longfei 已提交
191
        self.outputs = {'Out': np.floor(self.inputs['X'])}
D
dzhwinter 已提交
192 193 194 195 196

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
197
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
198 199


C
add cos  
chengduoZH 已提交
200 201 202 203 204 205 206 207 208 209 210 211 212 213
class TestCos(OpTest):
    def setUp(self):
        self.op_type = "cos"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
        self.outputs = {'Out': math.cos(self.inputs['X'])}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


D
dzhwinter 已提交
214 215 216 217 218
class TestRound(OpTest):
    def setUp(self):
        self.op_type = "round"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
F
fengjiayi 已提交
219
        self.outputs = {'Out': np.round(self.inputs['X'])}
D
dzhwinter 已提交
220 221 222 223 224

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
225
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
226 227


Q
qijun 已提交
228
class TestRelu(OpTest):
229
    def setUp(self):
Q
qijun 已提交
230
        self.op_type = "relu"
K
Kexin Zhao 已提交
231 232 233 234
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
235 236
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
237 238 239 240
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
241 242 243 244 245

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
K
Kexin Zhao 已提交
246 247
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
248
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
249

K
Kexin Zhao 已提交
250 251 252 253 254 255 256 257 258 259 260 261 262 263
    def init_dtype(self):
        pass


class TestFP16Relu(TestRelu):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

264 265 266 267 268

class TestBRelu(OpTest):
    def setUp(self):
        self.op_type = "brelu"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
Y
Yang Yang(Tony) 已提交
269 270
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
271 272
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
273
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
Q
qijun 已提交
274 275

        self.inputs = {'X': x}
276 277 278 279
        self.attrs = {'t_min': t_min, 't_max': t_max}
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
F
fengjiayi 已提交
280
        self.outputs = {'Out': t}
281 282 283 284 285

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
286
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
287 288


289
class TestRelu6(OpTest):
K
Kavya Srinet 已提交
290
    def setUp(self):
291 292 293 294 295 296 297 298 299
        self.op_type = "relu6"
        x = np.random.uniform(-1, 1, [4, 10]).astype("float32")
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02

        self.inputs = {'X': x}
        self.attrs = {'threshold': threshold}
K
Kavya Srinet 已提交
300
        self.outputs = {
F
fengjiayi 已提交
301
            'Out': np.minimum(np.maximum(self.inputs['X'], 0), threshold)
K
Kavya Srinet 已提交
302 303 304 305 306 307
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
308
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
K
Kavya Srinet 已提交
309 310


311 312 313
class TestSoftRelu(OpTest):
    def setUp(self):
        self.op_type = "soft_relu"
Q
qijun 已提交
314
        x = np.random.uniform(-3, 3, [4, 4]).astype("float32")
Y
Yang Yang(Tony) 已提交
315
        threshold = 2.0
Q
qijun 已提交
316 317 318
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
        x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
319 320 321 322 323
        self.inputs = {'X': x}
        self.attrs = {'threshold': threshold}
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
F
fengjiayi 已提交
324
        self.outputs = {'Out': np.log((np.exp(t) + 1))}
325 326 327 328 329

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
330
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
331 332


333 334 335 336 337 338 339 340 341 342
class TestELU(OpTest):
    def setUp(self):
        self.op_type = "elu"
        x = np.random.uniform(-3, 3, [4, 4]).astype("float32")
        alpha = 1.
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {
F
fengjiayi 已提交
343
            'Out': np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
344 345 346 347 348 349
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
350
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
351 352


Q
qijun 已提交
353 354 355 356
class TestReciprocal(OpTest):
    def setUp(self):
        self.op_type = "reciprocal"
        self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
F
fengjiayi 已提交
357
        self.outputs = {'Out': np.reciprocal(self.inputs['X'])}
Q
qijun 已提交
358 359 360 361 362

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
363
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
364 365 366 367 368 369 370 371


class TestLog(OpTest):
    def setUp(self):
        self.op_type = "log"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
372
        self.outputs = {'Out': np.log(self.inputs['X'])}
Q
qijun 已提交
373 374 375 376 377

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
378
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
379 380 381 382 383 384 385 386


class TestSquare(OpTest):
    def setUp(self):
        self.op_type = "square"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
387
        self.outputs = {'Out': np.square(self.inputs['X'])}
Q
qijun 已提交
388 389 390 391 392

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
393
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
394 395


396 397 398 399
class TestPow(OpTest):
    def setUp(self):
        self.op_type = "pow"
        self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
Y
Yang Yang(Tony) 已提交
400
        self.attrs = {'factor': 3.0}
F
fengjiayi 已提交
401
        self.outputs = {'Out': np.power(self.inputs['X'], 3)}
402 403 404 405 406

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
407
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
408 409 410 411 412 413 414 415 416 417 418


class TestSTanh(OpTest):
    def setUp(self):
        self.op_type = "stanh"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
F
fengjiayi 已提交
419
        self.outputs = {'Out': scale_b * np.tanh(self.inputs['X'] * scale_a)}
420 421 422 423

    def test_check_output(self):
        self.check_output()

Q
qijun 已提交
424
    def test_check_grad(self):
F
fengjiayi 已提交
425
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
426 427


K
kexinzhao 已提交
428 429 430 431
class TestSoftplus(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.inputs = {
Y
Yu Yang 已提交
432
            'X': np.random.uniform(-1, 1, [11, 17]).astype("float64")
K
kexinzhao 已提交
433
        }
F
fengjiayi 已提交
434
        self.outputs = {'Out': np.log(1 + np.exp(self.inputs['X']))}
K
kexinzhao 已提交
435 436 437 438 439

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
440
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
K
kexinzhao 已提交
441 442


443 444 445 446 447 448 449
class TestSoftsign(OpTest):
    def setUp(self):
        self.op_type = "softsign"
        self.inputs = {
            'X': np.random.uniform(-1, 1, [11, 17]).astype("float32")
        }
        self.outputs = {
F
fengjiayi 已提交
450
            'Out': np.divide(self.inputs['X'], 1 + np.abs(self.inputs['X']))
451 452 453 454 455 456
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
457
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
458 459


460 461 462 463 464 465 466 467 468 469 470 471
class TestThresholdedRelu(OpTest):
    def setUp(self):
        self.op_type = "thresholded_relu"
        threshold = 0.25
        self.relative_error = 0.005
        X = np.random.uniform(-1, 1, [11, 17]).astype("float32")

        # Same reason as TestAbs
        X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2

        self.inputs = {'X': X}
        self.attrs = {'threshold': threshold}
F
fengjiayi 已提交
472
        self.outputs = {'Out': (X > threshold) * X}
473 474 475 476 477

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
478
        self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
479 480


481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
class TestHardSigmoid(OpTest):
    def setUp(self):
        self.op_type = "hard_sigmoid"
        self.relative_error = 0.002

        X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

        self.inputs = {'X': X}
        # Same reason as TestAbs
        X[np.abs(X - lower_threshold) < self.relative_error] = \
            lower_threshold + 0.2
        X[np.abs(X - upper_threshold) < self.relative_error] = \
            upper_threshold - 0.2

        temp = X * slope + offset
F
fengjiayi 已提交
500
        self.outputs = {'Out': np.maximum(0.0, np.minimum(1.0, temp))}
501 502 503 504 505

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
506
        self.check_grad(['X'], 'Out', max_relative_error=0.002)
507 508


A
Abhinav Arora 已提交
509 510 511 512 513 514
class TestSwish(OpTest):
    def setUp(self):
        self.op_type = "swish"
        X = np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        self.inputs = {'X': X}
        self.attrs = {'beta': 2.3}
F
fengjiayi 已提交
515
        self.outputs = {'Out': X * expit(self.attrs['beta'] * X)}
A
Abhinav Arora 已提交
516 517 518 519 520

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
521
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
522 523


524
#--------------------test MKLDNN--------------------
525
class TestMKLDNNRelu(TestRelu):
526
    def setUp(self):
527 528
        super(TestMKLDNNRelu, self).setUp()

529 530 531
        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32")
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
532
        out = np.maximum(x, 0)
533

534 535 536
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}
537 538


539
class TestMKLDNNTanh(TestTanh):
540
    def setUp(self):
541 542
        super(TestMKLDNNTanh, self).setUp()

543 544 545 546
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32")
        }
        self.outputs = {'Out': np.tanh(self.inputs['X'])}
K
Krzysztof Binias 已提交
547
        self.attrs = {"use_mkldnn": True}
548 549


550
class TestMKLDNNSqrt(TestSqrt):
551
    def setUp(self):
552 553
        super(TestMKLDNNSqrt, self).setUp()

554 555 556 557
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32")
        }
        self.outputs = {'Out': np.sqrt(self.inputs['X'])}
K
Krzysztof Binias 已提交
558
        self.attrs = {"use_mkldnn": True}
559 560


561
class TestMKLDNNAbs(TestAbs):
562
    def setUp(self):
563 564
        super(TestMKLDNNAbs, self).setUp()

565 566 567 568 569
        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32")
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        self.inputs = {'X': x}
        self.outputs = {'Out': np.abs(self.inputs['X'])}
K
Krzysztof Binias 已提交
570
        self.attrs = {"use_mkldnn": True}
571 572


Q
qijun 已提交
573 574
if __name__ == "__main__":
    unittest.main()