test_activation_op.py 14.0 KB
Newer Older
D
dzhwinter 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
Q
qijun 已提交
14 15 16
import unittest
import numpy as np
from op_test import OpTest
A
Abhinav Arora 已提交
17
from scipy.special import expit
Q
qijun 已提交
18 19 20 21 22 23 24 25


class TestExp(OpTest):
    def setUp(self):
        self.op_type = "exp"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
26
        self.outputs = {'Out': np.exp(self.inputs['X'])}
Q
qijun 已提交
27 28 29 30 31

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
32
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
33 34 35 36 37 38 39 40


class TestSigmoid(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
41
        self.outputs = {'Out': 1 / (1 + np.exp(-self.inputs['X']))}
Q
qijun 已提交
42 43 44 45

    def test_check_output(self):
        self.check_output()

46
    def test_check_grad(self):
F
fengjiayi 已提交
47
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
48 49


50 51 52 53 54 55
class TestLogSigmoid(OpTest):
    def setUp(self):
        self.op_type = "logsigmoid"
        self.inputs = {
            'X': np.random.uniform(-1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
56
        self.outputs = {'Out': np.log(1 / (1 + np.exp(-self.inputs['X'])))}
57 58 59 60 61

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
62
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
63 64


65 66 67 68 69 70
class TestTanh(OpTest):
    def setUp(self):
        self.op_type = "tanh"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
71
        self.outputs = {'Out': np.tanh(self.inputs['X'])}
72 73 74 75 76

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
77
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
78 79


K
Kavya Srinet 已提交
80 81 82 83 84 85
class TestTanhShrink(OpTest):
    def setUp(self):
        self.op_type = "tanh_shrink"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [10, 17]).astype("float32")
        }
F
fengjiayi 已提交
86
        self.outputs = {'Out': self.inputs['X'] - np.tanh(self.inputs['X'])}
K
Kavya Srinet 已提交
87 88 89 90 91

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
92
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
K
Kavya Srinet 已提交
93 94


95 96 97 98 99 100 101 102 103 104 105
class TestHardShrink(OpTest):
    def setUp(self):
        self.op_type = "hard_shrink"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        threshold = 0.5

        self.inputs = {'X': x}
        self.attrs = {'lambda': threshold}

        t = np.copy(x)
        t[(t >= -threshold) & (t <= threshold)] = 0
F
fengjiayi 已提交
106
        self.outputs = {'Out': t}
107 108 109 110 111

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
112
        self.check_grad(['X'], 'Out', max_relative_error=0.005)
113 114


115 116 117 118 119 120 121 122 123 124 125
class TestSoftShrink(OpTest):
    def setUp(self):
        self.op_type = "softshrink"
        lambda_val = 0.1
        self.attrs = {'lambda': lambda_val}
        self.inputs = {
            'X': np.random.uniform(0.25, 10, [4, 4]).astype("float32")
        }
        y = np.copy(self.inputs['X'])
        y = (y < -lambda_val) * (y + lambda_val) + (y > lambda_val) * (
            y - lambda_val)
F
fengjiayi 已提交
126
        self.outputs = {'Out': y}
127 128 129 130 131

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
132
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
133 134


135 136 137 138 139 140
class TestSqrt(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
141
        self.outputs = {'Out': np.sqrt(self.inputs['X'])}
142 143 144 145 146

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
147
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
148 149 150 151 152


class TestAbs(OpTest):
    def setUp(self):
        self.op_type = "abs"
Q
qijun 已提交
153 154 155 156 157 158
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        # Because we set delta = 0.005 in caculating numeric gradient,
        # if x is too small, such as 0.002, x_neg will be -0.003
        # x_pos will be 0.007, so the numeric gradient is unaccurate.
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
159
        self.inputs = {'X': x}
F
fengjiayi 已提交
160
        self.outputs = {'Out': np.abs(self.inputs['X'])}
161 162 163 164 165

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
166
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
167 168


D
dzhwinter 已提交
169 170 171 172 173
class TestCeil(OpTest):
    def setUp(self):
        self.op_type = "ceil"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
F
fengjiayi 已提交
174
        self.outputs = {'Out': np.ceil(self.inputs['X'])}
D
dzhwinter 已提交
175 176 177 178 179

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
180
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
181 182 183 184 185 186 187 188


class TestFloor(OpTest):
    def setUp(self):
        self.op_type = "floor"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
        # numpy floor need +1
F
fengjiayi 已提交
189
        self.outputs = {'Out': np.floor(self.inputs['X']) + 1.0}
D
dzhwinter 已提交
190 191 192 193 194

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
195
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
196 197 198 199 200 201 202


class TestRound(OpTest):
    def setUp(self):
        self.op_type = "round"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
F
fengjiayi 已提交
203
        self.outputs = {'Out': np.round(self.inputs['X'])}
D
dzhwinter 已提交
204 205 206 207 208

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
209
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
210 211


Q
qijun 已提交
212
class TestRelu(OpTest):
213
    def setUp(self):
Q
qijun 已提交
214 215 216 217 218
        self.op_type = "relu"
        x = np.random.uniform(-1, 1, [11, 17]).astype("float32")
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        self.inputs = {'X': x}
F
fengjiayi 已提交
219
        self.outputs = {'Out': np.maximum(self.inputs['X'], 0)}
220 221 222 223 224

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
225
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
226 227 228 229 230 231


class TestBRelu(OpTest):
    def setUp(self):
        self.op_type = "brelu"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
Y
Yang Yang(Tony) 已提交
232 233
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
234 235
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
236
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
Q
qijun 已提交
237 238

        self.inputs = {'X': x}
239 240 241 242
        self.attrs = {'t_min': t_min, 't_max': t_max}
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
F
fengjiayi 已提交
243
        self.outputs = {'Out': t}
244 245 246 247 248

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
249
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
250 251


252
class TestRelu6(OpTest):
K
Kavya Srinet 已提交
253
    def setUp(self):
254 255 256 257 258 259 260 261 262
        self.op_type = "relu6"
        x = np.random.uniform(-1, 1, [4, 10]).astype("float32")
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02

        self.inputs = {'X': x}
        self.attrs = {'threshold': threshold}
K
Kavya Srinet 已提交
263
        self.outputs = {
F
fengjiayi 已提交
264
            'Out': np.minimum(np.maximum(self.inputs['X'], 0), threshold)
K
Kavya Srinet 已提交
265 266 267 268 269 270
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
271
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
K
Kavya Srinet 已提交
272 273


274 275 276
class TestSoftRelu(OpTest):
    def setUp(self):
        self.op_type = "soft_relu"
Q
qijun 已提交
277
        x = np.random.uniform(-3, 3, [4, 4]).astype("float32")
Y
Yang Yang(Tony) 已提交
278
        threshold = 2.0
Q
qijun 已提交
279 280 281
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
        x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
282 283 284 285 286
        self.inputs = {'X': x}
        self.attrs = {'threshold': threshold}
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
F
fengjiayi 已提交
287
        self.outputs = {'Out': np.log((np.exp(t) + 1))}
288 289 290 291 292

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
293
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
294 295


296 297 298 299 300 301 302 303 304 305
class TestELU(OpTest):
    def setUp(self):
        self.op_type = "elu"
        x = np.random.uniform(-3, 3, [4, 4]).astype("float32")
        alpha = 1.
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {
F
fengjiayi 已提交
306
            'Out': np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
307 308 309 310 311 312
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
313
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
314 315


Q
qijun 已提交
316 317 318 319
class TestReciprocal(OpTest):
    def setUp(self):
        self.op_type = "reciprocal"
        self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
F
fengjiayi 已提交
320
        self.outputs = {'Out': np.reciprocal(self.inputs['X'])}
Q
qijun 已提交
321 322 323 324 325

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
326
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
327 328 329 330 331 332 333 334


class TestLog(OpTest):
    def setUp(self):
        self.op_type = "log"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
335
        self.outputs = {'Out': np.log(self.inputs['X'])}
Q
qijun 已提交
336 337 338 339 340

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
341
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
342 343 344 345 346 347 348 349


class TestSquare(OpTest):
    def setUp(self):
        self.op_type = "square"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
350
        self.outputs = {'Out': np.square(self.inputs['X'])}
Q
qijun 已提交
351 352 353 354 355

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
356
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
357 358


359 360 361 362
class TestPow(OpTest):
    def setUp(self):
        self.op_type = "pow"
        self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
Y
Yang Yang(Tony) 已提交
363
        self.attrs = {'factor': 3.0}
F
fengjiayi 已提交
364
        self.outputs = {'Out': np.power(self.inputs['X'], 3)}
365 366 367 368 369

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
370
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
371 372 373 374 375 376 377 378 379 380 381


class TestSTanh(OpTest):
    def setUp(self):
        self.op_type = "stanh"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
F
fengjiayi 已提交
382
        self.outputs = {'Out': scale_b * np.tanh(self.inputs['X'] * scale_a)}
383 384 385 386

    def test_check_output(self):
        self.check_output()

Q
qijun 已提交
387
    def test_check_grad(self):
F
fengjiayi 已提交
388
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
389 390


K
kexinzhao 已提交
391 392 393 394
class TestSoftplus(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.inputs = {
Y
Yu Yang 已提交
395
            'X': np.random.uniform(-1, 1, [11, 17]).astype("float64")
K
kexinzhao 已提交
396
        }
F
fengjiayi 已提交
397
        self.outputs = {'Out': np.log(1 + np.exp(self.inputs['X']))}
K
kexinzhao 已提交
398 399 400 401 402

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
403
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
K
kexinzhao 已提交
404 405


406 407 408 409 410 411 412
class TestSoftsign(OpTest):
    def setUp(self):
        self.op_type = "softsign"
        self.inputs = {
            'X': np.random.uniform(-1, 1, [11, 17]).astype("float32")
        }
        self.outputs = {
F
fengjiayi 已提交
413
            'Out': np.divide(self.inputs['X'], 1 + np.abs(self.inputs['X']))
414 415 416 417 418 419
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
420
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
421 422


423 424 425 426 427 428 429 430 431 432 433 434
class TestThresholdedRelu(OpTest):
    def setUp(self):
        self.op_type = "thresholded_relu"
        threshold = 0.25
        self.relative_error = 0.005
        X = np.random.uniform(-1, 1, [11, 17]).astype("float32")

        # Same reason as TestAbs
        X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2

        self.inputs = {'X': X}
        self.attrs = {'threshold': threshold}
F
fengjiayi 已提交
435
        self.outputs = {'Out': (X > threshold) * X}
436 437 438 439 440

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
441
        self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
442 443


444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
class TestHardSigmoid(OpTest):
    def setUp(self):
        self.op_type = "hard_sigmoid"
        self.relative_error = 0.002

        X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

        self.inputs = {'X': X}
        # Same reason as TestAbs
        X[np.abs(X - lower_threshold) < self.relative_error] = \
            lower_threshold + 0.2
        X[np.abs(X - upper_threshold) < self.relative_error] = \
            upper_threshold - 0.2

        temp = X * slope + offset
F
fengjiayi 已提交
463
        self.outputs = {'Out': np.maximum(0.0, np.minimum(1.0, temp))}
464 465 466 467 468

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
469
        self.check_grad(['X'], 'Out', max_relative_error=0.002)
470 471


A
Abhinav Arora 已提交
472 473 474 475 476 477
class TestSwish(OpTest):
    def setUp(self):
        self.op_type = "swish"
        X = np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        self.inputs = {'X': X}
        self.attrs = {'beta': 2.3}
F
fengjiayi 已提交
478
        self.outputs = {'Out': X * expit(self.attrs['beta'] * X)}
A
Abhinav Arora 已提交
479 480 481 482 483

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
484
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
485 486


Q
qijun 已提交
487 488
if __name__ == "__main__":
    unittest.main()