primitives.py 3.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
from paddle.fluid.layers.tensor import assign  # noqa: F401
15
from paddle.fluid.layers.tensor import cast  # noqa: F401
16
from paddle.fluid.layers.tensor import fill_constant  # noqa: F401
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
from paddle.tensor import abs  # noqa: F401
from paddle.tensor import acos  # noqa: F401
from paddle.tensor import acosh  # noqa: F401
from paddle.tensor import add  # noqa: F401
from paddle.tensor import asin  # noqa: F401
from paddle.tensor import asinh  # noqa: F401
from paddle.tensor import atan  # noqa: F401
from paddle.tensor import atanh  # noqa: F401
from paddle.tensor import broadcast_shape  # noqa: F401
from paddle.tensor import broadcast_to  # noqa: F401
from paddle.tensor import cos  # noqa: F401
from paddle.tensor import cosh  # noqa: F401
from paddle.tensor import cumprod  # noqa: F401
from paddle.tensor import cumsum  # noqa: F401
from paddle.tensor import digamma  # noqa: F401
from paddle.tensor import divide  # noqa: F401
from paddle.tensor import erf  # noqa: F401
from paddle.tensor import erfinv  # noqa: F401
from paddle.tensor import exp  # noqa: F401
from paddle.tensor import expm1  # noqa: F401
from paddle.tensor import lgamma  # noqa: F401
from paddle.tensor import log  # noqa: F401
from paddle.tensor import log1p  # noqa: F401
from paddle.tensor import logcumsumexp  # noqa: F401
from paddle.tensor import logit  # noqa: F401
from paddle.tensor import logsumexp  # noqa: F401
43
from paddle.tensor import max  # noqa: F401
44
from paddle.tensor import mean  # noqa: F401
45
from paddle.tensor import min  # noqa: F401
46
from paddle.tensor import multiply  # noqa: F401
47
from paddle.tensor import ones  # noqa: F401
48 49
from paddle.tensor import pow  # noqa: F401
from paddle.tensor import prod  # noqa: F401
50
from paddle.tensor import reshape  # noqa: F401
51 52 53
from paddle.tensor import sign  # noqa: F401
from paddle.tensor import sin  # noqa: F401
from paddle.tensor import sinh  # noqa: F401
54
from paddle.tensor import sqrt  # noqa: F401
55 56 57 58
from paddle.tensor import subtract  # noqa: F401
from paddle.tensor import sum  # noqa: F401
from paddle.tensor import tan  # noqa: F401
from paddle.tensor import tanh  # noqa: F401
59
from paddle.tensor import zeros  # noqa: F401
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83

math_op = [
    'add',
    'subtract',
    'multiply',
    'divide',
    'abs',
    'pow',
    'sign',
    'sum',
    'prod',
    'cumsum',
    'cumprod',
    'digamma',
    'lgamma',
    'erf',
    'erfinv',
    'exp',
    'expm1',
    'log',
    'log1p',
    'logsumexp',
    'logcumsumexp',
    'logit',
84 85
    'max',
    'min',
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
]

trigonometric_op = [
    'sin',
    'cos',
    'tan',
    'sinh',
    'cosh',
    'tanh',
    'asin',
    'acos',
    'atan',
    'asinh',
    'acosh',
    'atanh',
]

103 104 105 106 107 108 109
sub_prim = [
    'mean',
    'ones',
    'zeros',
    'sqrt',
]

110 111 112
others = [
    'cast',
    'broadcast_to',
113 114 115
    'assign',
    'fill_constant',
    'reshape',
116 117 118 119 120
]

__all__ = []
__all__.extend(math_op)
__all__.extend(trigonometric_op)
121
__all__.extend(sub_prim)
122 123 124
__all__.extend(others)

__all__.sort()