primitives.py 3.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
from paddle.fluid.layers.tensor import assign  # noqa: F401
15
from paddle.fluid.layers.tensor import cast  # noqa: F401
16
from paddle.fluid.layers.tensor import fill_constant  # noqa: F401
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
from paddle.tensor import abs  # noqa: F401
from paddle.tensor import acos  # noqa: F401
from paddle.tensor import acosh  # noqa: F401
from paddle.tensor import add  # noqa: F401
from paddle.tensor import asin  # noqa: F401
from paddle.tensor import asinh  # noqa: F401
from paddle.tensor import atan  # noqa: F401
from paddle.tensor import atanh  # noqa: F401
from paddle.tensor import broadcast_shape  # noqa: F401
from paddle.tensor import broadcast_to  # noqa: F401
from paddle.tensor import cos  # noqa: F401
from paddle.tensor import cosh  # noqa: F401
from paddle.tensor import cumprod  # noqa: F401
from paddle.tensor import cumsum  # noqa: F401
from paddle.tensor import digamma  # noqa: F401
from paddle.tensor import divide  # noqa: F401
from paddle.tensor import erf  # noqa: F401
from paddle.tensor import erfinv  # noqa: F401
from paddle.tensor import exp  # noqa: F401
from paddle.tensor import expm1  # noqa: F401
W
wangruting 已提交
37
from paddle.tensor import flatten  # noqa: F401
38 39 40 41 42 43
from paddle.tensor import lgamma  # noqa: F401
from paddle.tensor import log  # noqa: F401
from paddle.tensor import log1p  # noqa: F401
from paddle.tensor import logcumsumexp  # noqa: F401
from paddle.tensor import logit  # noqa: F401
from paddle.tensor import logsumexp  # noqa: F401
44
from paddle.tensor import max  # noqa: F401
45
from paddle.tensor import mean  # noqa: F401
46
from paddle.tensor import min  # noqa: F401
47
from paddle.tensor import multiply  # noqa: F401
48
from paddle.tensor import ones  # noqa: F401
49 50
from paddle.tensor import pow  # noqa: F401
from paddle.tensor import prod  # noqa: F401
51
from paddle.tensor import reshape  # noqa: F401
52 53 54
from paddle.tensor import sign  # noqa: F401
from paddle.tensor import sin  # noqa: F401
from paddle.tensor import sinh  # noqa: F401
55
from paddle.tensor import sqrt  # noqa: F401
56 57 58 59
from paddle.tensor import subtract  # noqa: F401
from paddle.tensor import sum  # noqa: F401
from paddle.tensor import tan  # noqa: F401
from paddle.tensor import tanh  # noqa: F401
60
from paddle.tensor import zeros  # noqa: F401
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

math_op = [
    'add',
    'subtract',
    'multiply',
    'divide',
    'abs',
    'pow',
    'sign',
    'sum',
    'prod',
    'cumsum',
    'cumprod',
    'digamma',
    'lgamma',
    'erf',
    'erfinv',
    'exp',
    'expm1',
    'log',
    'log1p',
    'logsumexp',
    'logcumsumexp',
    'logit',
85 86
    'max',
    'min',
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
]

trigonometric_op = [
    'sin',
    'cos',
    'tan',
    'sinh',
    'cosh',
    'tanh',
    'asin',
    'acos',
    'atan',
    'asinh',
    'acosh',
    'atanh',
]

104 105 106 107 108 109 110
sub_prim = [
    'mean',
    'ones',
    'zeros',
    'sqrt',
]

111 112 113
others = [
    'cast',
    'broadcast_to',
114 115 116
    'assign',
    'fill_constant',
    'reshape',
W
wangruting 已提交
117
    'flatten',
118 119 120 121 122
]

__all__ = []
__all__.extend(math_op)
__all__.extend(trigonometric_op)
123
__all__.extend(sub_prim)
124 125 126
__all__.extend(others)

__all__.sort()