ops.py 6.1 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
P
peizhilin 已提交
16
import os
17
from .layer_function_generator import generate_layer_fn, generate_activation_fn
C
chengduo 已提交
18 19
from .. import core
from ..framework import convert_np_dtype_to_dtype_
Y
Yang Yu 已提交
20

21
__activations_noattr__ = [
22 23 24 25
    'sigmoid',
    'logsigmoid',
    'exp',
    'tanh',
26
    'atan',
27 28
    'tanh_shrink',
    'sqrt',
Z
zhoukunsheng 已提交
29
    'rsqrt',
30 31 32
    'abs',
    'ceil',
    'floor',
C
add cos  
chengduoZH 已提交
33
    'cos',
34 35
    'acos',
    'asin',
C
add sin  
chengduoZH 已提交
36
    'sin',
37 38 39 40 41
    'round',
    'reciprocal',
    'square',
    'softplus',
    'softsign',
Y
Yu Yang 已提交
42 43
]

X
Xin Pan 已提交
44
__all__ = []
Y
Yang Yu 已提交
45

Y
Yu Yang 已提交
46
for _OP in set(__all__):
47
    globals()[_OP] = generate_layer_fn(_OP)
Y
yuyang18 已提交
48

S
sneaxiy 已提交
49 50 51 52 53
# It is a hot fix in some unittest using:
#   fluid.layers.scale(x=x, scale=10.0, out=out_var)
# e.g.: test_program_code.py, test_dist_train.py
globals()['_scale'] = generate_layer_fn('scale')

S
sneaxiy 已提交
54 55
globals()['_elementwise_div'] = generate_layer_fn('elementwise_div')

56 57 58
__all__ += __activations_noattr__

for _OP in set(__activations_noattr__):
59
    globals()[_OP] = generate_activation_fn(_OP)
60

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
__all__ += ['softshrink']

_softshrink_ = generate_layer_fn('softshrink')


def softshrink(x, alpha=None):
    locals_var = locals().copy()
    kwargs = dict()
    for name, val in locals_var.items():
        if val is not None:
            if name == 'alpha':
                kwargs['lambda'] = val
            else:
                kwargs[name] = val
    return _softshrink_(**kwargs)


softshrink.__doc__ = """
:strong:`Softshrink Activation Operator`

..  math::
    out = \begin{cases}
            x - \alpha, \text{if } x > \alpha \\
            x + \alpha, \text{if } x < -\alpha \\
            0,  \text{otherwise}
            \end{cases}


Args:
    x: Input of Softshrink operator
    alpha (FLOAT): non-negative offset
    
Returns:
    Output of Softshrink operator

Examples:
    .. code-block:: python
    
        import paddle.fluid as fluid
        data = fluid.layers.data(name="input", shape=[784])
        result = fluid.layers.softshrink(x=data, alpha=0.3)
"""

Y
yuyang18 已提交
104 105 106 107 108 109
__all__ += ['hard_shrink']

_hard_shrink_ = generate_layer_fn('hard_shrink')


def hard_shrink(x, threshold=None):
110
    locals_var = locals().copy()
Y
yuyang18 已提交
111
    kwargs = dict()
112
    for name, val in locals_var.items():
Y
yuyang18 已提交
113 114 115 116 117
        if val is not None:
            kwargs[name] = val
    return _hard_shrink_(**kwargs)


Y
yuyang18 已提交
118
hard_shrink.__doc__ = _hard_shrink_.__doc__ + """
Y
yuyang18 已提交
119 120
Examples:

121
    >>> import paddle.fluid as fluid
Y
yuyang18 已提交
122 123 124
    >>> data = fluid.layers.data(name="input", shape=[784])
    >>> result = fluid.layers.hard_shrink(x=data, threshold=0.3)
"""
Y
yuyang18 已提交
125

W
wopeizl 已提交
126 127 128 129 130 131
__all__ += ['cumsum']

_cum_sum_ = generate_layer_fn('cumsum')


def cumsum(x, axis=None, exclusive=None, reverse=None):
132
    locals_var = locals().copy()
W
wopeizl 已提交
133
    kwargs = dict()
134
    for name, val in locals_var.items():
W
wopeizl 已提交
135 136 137 138 139 140 141 142
        if val is not None:
            kwargs[name] = val
    return _cum_sum_(**kwargs)


cumsum.__doc__ = _cum_sum_.__doc__ + """
Examples:

143
    >>> import paddle.fluid as fluid
W
wopeizl 已提交
144 145 146
    >>> data = fluid.layers.data(name="input", shape=[32, 784])
    >>> result = fluid.layers.cumsum(data, axis=0)
"""
Y
yuyang18 已提交
147 148 149 150 151 152 153

__all__ += ['thresholded_relu']

_thresholded_relu_ = generate_layer_fn('thresholded_relu')


def thresholded_relu(x, threshold=None):
154
    locals_var = locals().copy()
Y
yuyang18 已提交
155
    kwargs = dict()
156
    for name, val in locals_var.items():
Y
yuyang18 已提交
157 158 159
        if val is not None:
            kwargs[name] = val

C
chengduo 已提交
160
    return _thresholded_relu_(**kwargs)
Y
yuyang18 已提交
161 162


163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
thresholded_relu.__doc__ = """
:strong:`Thresholded ReLU Activation Operator`

Equation:
    ..  math::
        out = \\begin{cases}
            x, &if x > threshold \\\\
            0, &otherwise
            \\end{cases}

Args:
    x(Variable): The input of Thresholded ReLU op, Tensor or LoDTensor, dtype: float32 or float64.
        
    threshold(float, optional): The threshold value. Note that if the arg `threshold` is not set, the threshold in the equation is 1.0.

Returns:

    Variable: The output of Thresholded ReLU op, Tensor or LoDTensor, dtype: float32 or float64, the same as the input, shape: the same as the input.

Y
yuyang18 已提交
182
Examples:
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
    
    .. code-block:: python
    
        # declarative mode
        import numpy as np
        from paddle import fluid
        
        x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
        y = fluid.layers.thresholded_relu(x, threshold=0.1)
        
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        start = fluid.default_startup_program()
        main = fluid.default_main_program()
        
        data = np.random.randn(2, 3).astype("float32")
        exe.run(start)
        
        y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
        
        data
        # array([[ 0.21134382, -1.1805999 ,  0.32876605],
        #        [-1.2210793 , -0.7365624 ,  1.0013918 ]], dtype=float32)
        y_np
        # array([[ 0.21134382, -0.        ,  0.32876605],
        #        [-0.        , -0.        ,  1.0013918 ]], dtype=float32)
Y
yuyang18 已提交
209

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
    .. code-block:: python
    
        # imperative mode
        import numpy as np
        from paddle import fluid
        import paddle.fluid.dygraph as dg
        
        data = np.random.randn(2, 3).astype("float32")
        place = fluid.CPUPlace()
        with dg.guard(place) as g:
            x = dg.to_variable(data)
            y = fluid.layers.thresholded_relu(x, threshold=0.1)
            y_np = y.numpy()
        data
        # array([[ 0.21134382, -1.1805999 ,  0.32876605],
        #        [-1.2210793 , -0.7365624 ,  1.0013918 ]], dtype=float32)
        y_np
        # array([[ 0.21134382, -0.        ,  0.32876605],
        #        [-0.        , -0.        ,  1.0013918 ]], dtype=float32)
Y
yuyang18 已提交
229
"""