ops.py 3.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
16
from .layer_function_generator import generate_layer_fn, generate_layer_fn_noattr
Y
Yang Yu 已提交
17 18

__activations__ = [
19 20 21 22
    'softshrink',
]

__activations_noattr__ = [
23 24 25 26 27 28 29 30 31
    'sigmoid',
    'logsigmoid',
    'exp',
    'tanh',
    'tanh_shrink',
    'sqrt',
    'abs',
    'ceil',
    'floor',
C
add cos  
chengduoZH 已提交
32
    'cos',
C
add sin  
chengduoZH 已提交
33
    'sin',
34 35 36 37 38
    'round',
    'reciprocal',
    'square',
    'softplus',
    'softsign',
Y
Yu Yang 已提交
39 40
]

Y
Yang Yu 已提交
41
__all__ = [
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
    'mean',
    'mul',
    'scale',
    'sigmoid_cross_entropy_with_logits',
    'elementwise_add',
    'elementwise_div',
    'elementwise_sub',
    'elementwise_mul',
    'elementwise_max',
    'elementwise_min',
    'elementwise_pow',
    'clip',
    'clip_by_norm',
    'logical_and',
    'logical_or',
    'logical_xor',
    'logical_not',
    'uniform_random_batch_size_like',
    'gaussian_random',
R
robot 已提交
61
    'sampling_id',
62
    'gaussian_random_batch_size_like',
63
    'sum',
W
whs 已提交
64
    'slice',
65
    'shape',
Q
qingqing01 已提交
66
    'maxout',
Y
Yang Yu 已提交
67 68
] + __activations__

Y
Yu Yang 已提交
69
for _OP in set(__all__):
70
    globals()[_OP] = generate_layer_fn(_OP)
Y
yuyang18 已提交
71

72 73 74 75 76
__all__ += __activations_noattr__

for _OP in set(__activations_noattr__):
    globals()[_OP] = generate_layer_fn_noattr(_OP)

Y
yuyang18 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89
__all__ += ["uniform_random"]

_uniform_random_ = generate_layer_fn('uniform_random')


def uniform_random(shape, dtype=None, min=None, max=None, seed=None):
    kwargs = dict()
    for name in locals():
        val = locals()[name]
        if val is not None:
            kwargs[name] = val
    return _uniform_random_(**kwargs)

Y
yuyang18 已提交
90

Y
yuyang18 已提交
91
uniform_random.__doc__ = _uniform_random_.__doc__ + """
Y
yuyang18 已提交
92 93 94 95
Examples:

    >>> result = fluid.layers.uniform_random(shape=[32, 784])
"""
Y
yuyang18 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110

__all__ += ['hard_shrink']

_hard_shrink_ = generate_layer_fn('hard_shrink')


def hard_shrink(x, threshold=None):
    kwargs = dict()
    for name in locals():
        val = locals()[name]
        if val is not None:
            kwargs[name] = val
    return _hard_shrink_(**kwargs)


Y
yuyang18 已提交
111
hard_shrink.__doc__ = _hard_shrink_.__doc__ + """
Y
yuyang18 已提交
112 113 114 115 116
Examples:

    >>> data = fluid.layers.data(name="input", shape=[784])
    >>> result = fluid.layers.hard_shrink(x=data, threshold=0.3)
"""
Y
yuyang18 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138

__all__ += ['cumsum']

_cum_sum_ = generate_layer_fn('cumsum')


def cumsum(x, axis=None, exclusive=None, reverse=None):
    kwargs = dict()
    for name in locals():
        val = locals()[name]
        if val is not None:
            kwargs[name] = val

    return _cum_sum_(**kwargs)


cumsum.__doc__ = _cum_sum_.__doc__ + """
Examples:

    >>> data = fluid.layers.data(name="input", shape=[32, 784])
    >>> result = fluid.layers.cumsum(data, axis=0)
"""
Y
yuyang18 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160

__all__ += ['thresholded_relu']

_thresholded_relu_ = generate_layer_fn('thresholded_relu')


def thresholded_relu(x, threshold=None):
    kwargs = dict()
    for name in locals():
        val = locals()[name]
        if val is not None:
            kwargs[name] = val

    _thresholded_relu_(**kwargs)


thresholded_relu.__doc__ = _thresholded_relu_.__doc__ + """
Examples:

    >>> data = fluid.layers.data(name="input", shape=[1])
    >>> result = fluid.layers.thresholded_relu(data, threshold=0.4)
"""