__init__.py 26.8 KB
Newer Older
1
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
Z
zhangjinchao01 已提交
2 3 4 5 6 7 8 9 10 11 12 13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Y
Yancey 已提交
14
try:
15 16
    from paddle.version import full_version as __version__
    from paddle.version import commit as __git_commit__
17
    from paddle.cuda_env import *  # noqa: F403
Y
Yancey 已提交
18 19
except ImportError:
    import sys
20 21 22

    sys.stderr.write(
        '''Warning with import paddle: you should not
Y
Yancey 已提交
23
     import paddle from the source directory; please install paddlepaddle*.whl firstly.'''
24
    )
25

26
from .batch import batch  # noqa: F401
27 28 29 30

# Do the *DUPLICATED* monkey-patch for the tensor object.
# We need remove the duplicated code here once we fix
# the illogical implement in the monkey-patch methods later.
Z
zhiboniu 已提交
31
from .framework import monkey_patch_variable
32
from .framework import monkey_patch_math_tensor
33

34
monkey_patch_variable()
35
monkey_patch_math_tensor()
Z
zhiboniu 已提交
36 37 38 39 40 41 42 43

from .framework import disable_signal_handler  # noqa: F401
from .framework import get_flags  # noqa: F401
from .framework import set_flags  # noqa: F401

from .framework import disable_static  # noqa: F401
from .framework import enable_static  # noqa: F401
from .framework import in_dynamic_mode  # noqa: F401
44
from .fluid.dataset import *  # noqa: F401, F403
Z
zhiboniu 已提交
45

46
from .framework.dtype import iinfo  # noqa: F401
47
from .framework.dtype import finfo  # noqa: F401
48
from .framework.dtype import dtype  # noqa: F401
Z
zhiboniu 已提交
49 50 51 52 53 54 55 56 57 58 59 60
from .framework.dtype import uint8  # noqa: F401
from .framework.dtype import int8  # noqa: F401
from .framework.dtype import int16  # noqa: F401
from .framework.dtype import int32  # noqa: F401
from .framework.dtype import int64  # noqa: F401
from .framework.dtype import float16  # noqa: F401
from .framework.dtype import float32  # noqa: F401
from .framework.dtype import float64  # noqa: F401
from .framework.dtype import bfloat16  # noqa: F401
from .framework.dtype import bool  # noqa: F401
from .framework.dtype import complex64  # noqa: F401
from .framework.dtype import complex128  # noqa: F401
61

W
wanghuancoder 已提交
62
Tensor = framework.core.eager.Tensor  # noqa: F401
63 64 65 66 67 68 69 70 71 72 73
Tensor.__qualname__ = 'Tensor'  # noqa: F401
import paddle.distributed  # noqa: F401
import paddle.sysconfig  # noqa: F401
import paddle.distribution  # noqa: F401
import paddle.nn  # noqa: F401
import paddle.distributed.fleet  # noqa: F401
import paddle.optimizer  # noqa: F401
import paddle.metric  # noqa: F401
import paddle.regularizer  # noqa: F401
import paddle.incubate  # noqa: F401
import paddle.autograd  # noqa: F401
74
import paddle.device  # noqa: F401
75
import paddle.decomposition  # noqa: F401
76

77 78 79 80 81 82 83 84 85
import paddle.jit  # noqa: F401
import paddle.amp  # noqa: F401
import paddle.dataset  # noqa: F401
import paddle.inference  # noqa: F401
import paddle.io  # noqa: F401
import paddle.onnx  # noqa: F401
import paddle.reader  # noqa: F401
import paddle.static  # noqa: F401
import paddle.vision  # noqa: F401
Y
YangZhou 已提交
86
import paddle.audio  # noqa: F401
87
import paddle.geometric  # noqa: F401
88
import paddle.sparse  # noqa: F401
89
import paddle.quantization  # noqa: F401
90

91 92
from .tensor.attribute import is_complex  # noqa: F401
from .tensor.attribute import is_integer  # noqa: F401
93 94 95 96
from .tensor.attribute import rank  # noqa: F401
from .tensor.attribute import shape  # noqa: F401
from .tensor.attribute import real  # noqa: F401
from .tensor.attribute import imag  # noqa: F401
W
wuhuanzhou 已提交
97
from .tensor.attribute import is_floating_point  # noqa: F401
98
from .tensor.creation import create_parameter  # noqa: F401
99 100
from .tensor.creation import to_tensor  # noqa: F401
from .tensor.creation import diag  # noqa: F401
L
Li Min 已提交
101
from .tensor.creation import diagflat  # noqa: F401
102 103
from .tensor.creation import eye  # noqa: F401
from .tensor.creation import linspace  # noqa: F401
104
from .tensor.creation import logspace  # noqa: F401
105 106 107 108 109 110 111 112
from .tensor.creation import ones  # noqa: F401
from .tensor.creation import ones_like  # noqa: F401
from .tensor.creation import zeros  # noqa: F401
from .tensor.creation import zeros_like  # noqa: F401
from .tensor.creation import arange  # noqa: F401
from .tensor.creation import full  # noqa: F401
from .tensor.creation import full_like  # noqa: F401
from .tensor.creation import triu  # noqa: F401
113
from .tensor.creation import triu_  # noqa: F401
114
from .tensor.creation import tril  # noqa: F401
115
from .tensor.creation import tril_  # noqa: F401
116 117 118 119
from .tensor.creation import meshgrid  # noqa: F401
from .tensor.creation import empty  # noqa: F401
from .tensor.creation import empty_like  # noqa: F401
from .tensor.creation import assign  # noqa: F401
F
Feiyu Chan 已提交
120
from .tensor.creation import complex  # noqa: F401
121
from .tensor.creation import clone  # noqa: F401
122 123
from .tensor.creation import tril_indices  # noqa: F401
from .tensor.creation import triu_indices  # noqa: F401
124
from .tensor.creation import polar  # noqa: F401
125 126 127 128 129 130
from .tensor.linalg import matmul  # noqa: F401
from .tensor.linalg import dot  # noqa: F401
from .tensor.linalg import norm  # noqa: F401
from .tensor.linalg import transpose  # noqa: F401
from .tensor.linalg import dist  # noqa: F401
from .tensor.linalg import t  # noqa: F401
131
from .tensor.linalg import cdist  # noqa: F401
132 133 134 135
from .tensor.linalg import cross  # noqa: F401
from .tensor.linalg import cholesky  # noqa: F401
from .tensor.linalg import bmm  # noqa: F401
from .tensor.linalg import histogram  # noqa: F401
S
smallv0221 已提交
136
from .tensor.linalg import bincount  # noqa: F401
137 138
from .tensor.linalg import mv  # noqa: F401
from .tensor.logic import equal  # noqa: F401
139
from .tensor.logic import equal_  # noqa: F401
140
from .tensor.linalg import eigvalsh  # noqa: F401
141
from .tensor.logic import greater_equal  # noqa: F401
142
from .tensor.logic import greater_equal_  # noqa: F401
143
from .tensor.logic import greater_than  # noqa: F401
144
from .tensor.logic import greater_than_  # noqa: F401
145 146
from .tensor.logic import is_empty  # noqa: F401
from .tensor.logic import less_equal  # noqa: F401
147
from .tensor.logic import less_equal_  # noqa: F401
148
from .tensor.logic import less_than  # noqa: F401
149
from .tensor.logic import less_than_  # noqa: F401
150
from .tensor.logic import logical_and  # noqa: F401
151
from .tensor.logic import logical_and_  # noqa: F401
152
from .tensor.logic import logical_not  # noqa: F401
153
from .tensor.logic import logical_not_  # noqa: F401
154
from .tensor.logic import logical_or  # noqa: F401
155
from .tensor.logic import logical_or_  # noqa: F401
156
from .tensor.logic import logical_xor  # noqa: F401
157
from .tensor.logic import logical_xor_  # noqa: F401
158
from .tensor.logic import bitwise_and  # noqa: F401
159
from .tensor.logic import bitwise_and_  # noqa: F401
160
from .tensor.logic import bitwise_not  # noqa: F401
161
from .tensor.logic import bitwise_not_  # noqa: F401
162
from .tensor.logic import bitwise_or  # noqa: F401
163
from .tensor.logic import bitwise_or_  # noqa: F401
164
from .tensor.logic import bitwise_xor  # noqa: F401
165
from .tensor.logic import bitwise_xor_  # noqa: F401
166
from .tensor.logic import not_equal  # noqa: F401
167
from .tensor.logic import not_equal_  # noqa: F401
168
from .tensor.logic import allclose  # noqa: F401
A
andyjpaddle 已提交
169
from .tensor.logic import isclose  # noqa: F401
170 171 172
from .tensor.logic import equal_all  # noqa: F401
from .tensor.logic import is_tensor  # noqa: F401
from .tensor.manipulation import cast  # noqa: F401
173
from .tensor.manipulation import cast_  # noqa: F401
174
from .tensor.manipulation import concat  # noqa: F401
175
from .tensor.manipulation import broadcast_tensors  # noqa: F401
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
from .tensor.manipulation import expand  # noqa: F401
from .tensor.manipulation import broadcast_to  # noqa: F401
from .tensor.manipulation import expand_as  # noqa: F401
from .tensor.manipulation import tile  # noqa: F401
from .tensor.manipulation import flatten  # noqa: F401
from .tensor.manipulation import gather  # noqa: F401
from .tensor.manipulation import gather_nd  # noqa: F401
from .tensor.manipulation import reshape  # noqa: F401
from .tensor.manipulation import reshape_  # noqa: F401
from .tensor.manipulation import flip as reverse  # noqa: F401
from .tensor.manipulation import scatter  # noqa: F401
from .tensor.manipulation import scatter_  # noqa: F401
from .tensor.manipulation import scatter_nd_add  # noqa: F401
from .tensor.manipulation import scatter_nd  # noqa: F401
from .tensor.manipulation import shard_index  # noqa: F401
from .tensor.manipulation import slice  # noqa: F401
Z
zhiboniu 已提交
192
from .tensor.manipulation import crop  # noqa: F401
193
from .tensor.manipulation import split  # noqa: F401
194
from .tensor.manipulation import vsplit  # noqa: F401
195 196 197 198 199
from .tensor.manipulation import squeeze  # noqa: F401
from .tensor.manipulation import squeeze_  # noqa: F401
from .tensor.manipulation import stack  # noqa: F401
from .tensor.manipulation import strided_slice  # noqa: F401
from .tensor.manipulation import unique  # noqa: F401
D
duanboqiang 已提交
200
from .tensor.manipulation import unique_consecutive  # noqa: F401
201 202 203 204
from .tensor.manipulation import unsqueeze  # noqa: F401
from .tensor.manipulation import unsqueeze_  # noqa: F401
from .tensor.manipulation import unstack  # noqa: F401
from .tensor.manipulation import flip  # noqa: F401
Z
zmxdream 已提交
205
from .tensor.manipulation import rot90  # noqa: F401
206 207 208 209
from .tensor.manipulation import unbind  # noqa: F401
from .tensor.manipulation import roll  # noqa: F401
from .tensor.manipulation import chunk  # noqa: F401
from .tensor.manipulation import tolist  # noqa: F401
210
from .tensor.manipulation import take_along_axis  # noqa: F401
211
from .tensor.manipulation import put_along_axis  # noqa: F401
F
From00 已提交
212
from .tensor.manipulation import tensordot  # noqa: F401
213 214
from .tensor.manipulation import as_complex  # noqa: F401
from .tensor.manipulation import as_real  # noqa: F401
215
from .tensor.manipulation import moveaxis  # noqa: F401
K
kuizhiqing 已提交
216
from .tensor.manipulation import repeat_interleave  # noqa: F401
L
Li Min 已提交
217 218
from .tensor.manipulation import index_add  # noqa: F401
from .tensor.manipulation import index_add_  # noqa: F401
傅剑寒 已提交
219 220
from .tensor.manipulation import index_put  # noqa: F401
from .tensor.manipulation import index_put_  # noqa: F401
221
from .tensor.manipulation import unflatten  # noqa: F401
W
wanghuancoder 已提交
222 223 224 225
from .tensor.manipulation import as_strided  # noqa: F401
from .tensor.manipulation import view  # noqa: F401
from .tensor.manipulation import view_as  # noqa: F401
from .tensor.manipulation import unfold  # noqa: F401
226
from .tensor.math import abs  # noqa: F401
G
GGBond8488 已提交
227
from .tensor.math import abs_  # noqa: F401
228
from .tensor.math import acos  # noqa: F401
G
GGBond8488 已提交
229
from .tensor.math import acos_  # noqa: F401
230
from .tensor.math import asin  # noqa: F401
G
GGBond8488 已提交
231
from .tensor.math import asin_  # noqa: F401
232
from .tensor.math import atan  # noqa: F401
G
GGBond8488 已提交
233
from .tensor.math import atan_  # noqa: F401
R
ronnywang 已提交
234
from .tensor.math import atan2  # noqa: F401
235 236
from .tensor.math import ceil  # noqa: F401
from .tensor.math import cos  # noqa: F401
G
GGBond8488 已提交
237
from .tensor.math import cos_  # noqa: F401
238
from .tensor.math import tan  # noqa: F401
G
GGBond8488 已提交
239
from .tensor.math import tan_  # noqa: F401
240
from .tensor.math import cosh  # noqa: F401
G
GGBond8488 已提交
241
from .tensor.math import cosh_  # noqa: F401
242
from .tensor.math import cumsum  # noqa: F401
243
from .tensor.math import cumsum_  # noqa: F401
244 245
from .tensor.math import cummax  # noqa: F401
from .tensor.math import cummin  # noqa: F401
H
hlygit66666 已提交
246
from .tensor.math import cumprod  # noqa: F401
247
from .tensor.math import cumprod_  # noqa: F401
248
from .tensor.math import logcumsumexp  # noqa: F401
W
wangzhen38 已提交
249
from .tensor.math import logit  # noqa: F401
250
from .tensor.math import logit_  # noqa: F401
251
from .tensor.math import exp  # noqa: F401
R
ronnywang 已提交
252
from .tensor.math import expm1  # noqa: F401
G
GGBond8488 已提交
253
from .tensor.math import expm1_  # noqa: F401
254 255 256
from .tensor.math import floor  # noqa: F401
from .tensor.math import increment  # noqa: F401
from .tensor.math import log  # noqa: F401
257 258
from .tensor.math import log_  # noqa: F401
from .tensor.math import log2_  # noqa: F401
259 260
from .tensor.math import log2  # noqa: F401
from .tensor.math import log10  # noqa: F401
261
from .tensor.math import log10_  # noqa: F401
262 263
from .tensor.math import multiplex  # noqa: F401
from .tensor.math import pow  # noqa: F401
264
from .tensor.math import pow_  # noqa: F401
265 266 267 268 269 270 271 272
from .tensor.math import reciprocal  # noqa: F401
from .tensor.math import all  # noqa: F401
from .tensor.math import any  # noqa: F401
from .tensor.math import round  # noqa: F401
from .tensor.math import rsqrt  # noqa: F401
from .tensor.math import scale  # noqa: F401
from .tensor.math import sign  # noqa: F401
from .tensor.math import sin  # noqa: F401
G
GGBond8488 已提交
273
from .tensor.math import sin_  # noqa: F401
274
from .tensor.math import sinh  # noqa: F401
G
GGBond8488 已提交
275
from .tensor.math import sinh_  # noqa: F401
276 277
from .tensor.math import sqrt  # noqa: F401
from .tensor.math import square  # noqa: F401
G
GGBond8488 已提交
278
from .tensor.math import square_  # noqa: F401
279 280
from .tensor.math import stanh  # noqa: F401
from .tensor.math import sum  # noqa: F401
281
from .tensor.math import nan_to_num  # noqa: F401
282
from .tensor.math import nan_to_num_  # noqa: F401
W
wangguanqun 已提交
283
from .tensor.math import nansum  # noqa: F401
284
from .tensor.math import nanmean  # noqa: F401
285
from .tensor.math import count_nonzero  # noqa: F401
286 287 288 289 290
from .tensor.math import tanh  # noqa: F401
from .tensor.math import tanh_  # noqa: F401
from .tensor.math import add_n  # noqa: F401
from .tensor.math import max  # noqa: F401
from .tensor.math import maximum  # noqa: F401
T
Tao Luo 已提交
291
from .tensor.math import amax  # noqa: F401
292 293
from .tensor.math import min  # noqa: F401
from .tensor.math import minimum  # noqa: F401
T
Tao Luo 已提交
294
from .tensor.math import amin  # noqa: F401
295 296
from .tensor.math import mm  # noqa: F401
from .tensor.math import divide  # noqa: F401
297
from .tensor.math import divide_  # noqa: F401
298
from .tensor.math import floor_divide  # noqa: F401
299
from .tensor.math import floor_divide_  # noqa: F401
300
from .tensor.math import remainder  # noqa: F401
301
from .tensor.math import remainder_  # noqa: F401
302
from .tensor.math import mod  # noqa: F401
303
from .tensor.math import mod_  # noqa: F401
304
from .tensor.math import floor_mod  # noqa: F401
305
from .tensor.math import floor_mod_  # noqa: F401
306
from .tensor.math import multiply  # noqa: F401
307
from .tensor.math import multiply_  # noqa: F401
S
seemingwang 已提交
308
from .tensor.math import renorm  # noqa: F401
309
from .tensor.math import renorm_  # noqa: F401
310 311 312
from .tensor.math import add  # noqa: F401
from .tensor.math import subtract  # noqa: F401
from .tensor.math import logsumexp  # noqa: F401
Z
zhiboniu 已提交
313
from .tensor.math import logaddexp  # noqa: F401
314 315
from .tensor.math import inverse  # noqa: F401
from .tensor.math import log1p  # noqa: F401
316
from .tensor.math import log1p_  # noqa: F401
317
from .tensor.math import erf  # noqa: F401
G
GGBond8488 已提交
318
from .tensor.math import erf_  # noqa: F401
319
from .tensor.math import addmm  # noqa: F401
G
GGBond8488 已提交
320
from .tensor.math import addmm_  # noqa: F401
321 322
from .tensor.math import clip  # noqa: F401
from .tensor.math import trace  # noqa: F401
323
from .tensor.math import diagonal  # noqa: F401
324 325 326 327 328 329 330
from .tensor.math import kron  # noqa: F401
from .tensor.math import isfinite  # noqa: F401
from .tensor.math import isinf  # noqa: F401
from .tensor.math import isnan  # noqa: F401
from .tensor.math import prod  # noqa: F401
from .tensor.math import broadcast_shape  # noqa: F401
from .tensor.math import conj  # noqa: F401
331
from .tensor.math import trunc  # noqa: F401
332
from .tensor.math import trunc_  # noqa: F401
Z
zyfncg 已提交
333
from .tensor.math import digamma  # noqa: F401
334
from .tensor.math import digamma_  # noqa: F401
335
from .tensor.math import neg  # noqa: F401
336
from .tensor.math import neg_  # noqa: F401
337
from .tensor.math import lgamma  # noqa: F401
338
from .tensor.math import lgamma_  # noqa: F401
X
xiaoting 已提交
339
from .tensor.math import acosh  # noqa: F401
G
GGBond8488 已提交
340
from .tensor.math import acosh_  # noqa: F401
X
xiaoting 已提交
341
from .tensor.math import asinh  # noqa: F401
G
GGBond8488 已提交
342
from .tensor.math import asinh_  # noqa: F401
X
xiaoting 已提交
343
from .tensor.math import atanh  # noqa: F401
G
GGBond8488 已提交
344
from .tensor.math import atanh_  # noqa: F401
345
from .tensor.math import lerp  # noqa: F401
W
wuhuanzhou 已提交
346
from .tensor.math import erfinv  # noqa: F401
347 348
from .tensor.math import rad2deg  # noqa: F401
from .tensor.math import deg2rad  # noqa: F401
T
Tao Luo 已提交
349
from .tensor.math import gcd  # noqa: F401
350
from .tensor.math import gcd_  # noqa: F401
T
Tao Luo 已提交
351
from .tensor.math import lcm  # noqa: F401
352
from .tensor.math import lcm_  # noqa: F401
A
andyjpaddle 已提交
353
from .tensor.math import diff  # noqa: F401
F
Feiyu Chan 已提交
354
from .tensor.math import angle  # noqa: F401
L
LJQ❤️ 已提交
355 356
from .tensor.math import fmax  # noqa: F401
from .tensor.math import fmin  # noqa: F401
Z
zhiboniu 已提交
357 358
from .tensor.math import inner  # noqa: F401
from .tensor.math import outer  # noqa: F401
359
from .tensor.math import heaviside  # noqa: F401
360
from .tensor.math import frac  # noqa: F401
361
from .tensor.math import frac_  # noqa: F401
362
from .tensor.math import sgn  # noqa: F401
363
from .tensor.math import take  # noqa: F401
364
from .tensor.math import frexp  # noqa: F401
365
from .tensor.math import ldexp  # noqa: F401
366
from .tensor.math import ldexp_  # noqa: F401
367 368
from .tensor.math import trapezoid  # noqa: F401
from .tensor.math import cumulative_trapezoid  # noqa: F401
369
from .tensor.math import vander  # noqa: F401
370
from .tensor.math import nextafter  # noqa: F401
371
from .tensor.math import i0  # noqa: F401
372
from .tensor.math import i0_  # noqa: F401
373
from .tensor.math import i0e  # noqa: F401
374 375
from .tensor.math import i1  # noqa: F401
from .tensor.math import i1e  # noqa: F401
376
from .tensor.math import polygamma  # noqa: F401
377
from .tensor.math import polygamma_  # noqa: F401
L
Leo Chen 已提交
378

379 380
from .tensor.random import bernoulli  # noqa: F401
from .tensor.random import poisson  # noqa: F401
381 382 383 384 385 386 387
from .tensor.random import multinomial  # noqa: F401
from .tensor.random import standard_normal  # noqa: F401
from .tensor.random import normal  # noqa: F401
from .tensor.random import uniform  # noqa: F401
from .tensor.random import randn  # noqa: F401
from .tensor.random import rand  # noqa: F401
from .tensor.random import randint  # noqa: F401
388
from .tensor.random import randint_like  # noqa: F401
389 390 391 392
from .tensor.random import randperm  # noqa: F401
from .tensor.search import argmax  # noqa: F401
from .tensor.search import argmin  # noqa: F401
from .tensor.search import argsort  # noqa: F401
Y
Yanxing Shi 已提交
393
from .tensor.search import searchsorted  # noqa: F401
394
from .tensor.search import bucketize  # noqa: F401
395 396 397
from .tensor.search import masked_select  # noqa: F401
from .tensor.search import topk  # noqa: F401
from .tensor.search import where  # noqa: F401
398
from .tensor.search import where_  # noqa: F401
399 400 401
from .tensor.search import index_select  # noqa: F401
from .tensor.search import nonzero  # noqa: F401
from .tensor.search import sort  # noqa: F401
402
from .tensor.search import kthvalue  # noqa: F401
403
from .tensor.search import mode  # noqa: F401
404

405
from .tensor.to_string import set_printoptions  # noqa: F401
406

T
Tongxin Bai 已提交
407 408
from .tensor.einsum import einsum  # noqa: F401

409 410 411
from .framework.random import seed  # noqa: F401
from .framework.random import get_cuda_rng_state  # noqa: F401
from .framework.random import set_cuda_rng_state  # noqa: F401
Q
QingshuChen 已提交
412 413
from .framework.random import get_rng_state  # noqa: F401
from .framework.random import set_rng_state  # noqa: F401
414 415
from .framework import ParamAttr  # noqa: F401
from .framework import CPUPlace  # noqa: F401
J
jianghaicheng 已提交
416
from .framework import IPUPlace  # noqa: F401
417 418
from .framework import CUDAPlace  # noqa: F401
from .framework import CUDAPinnedPlace  # noqa: F401
419
from .framework import CustomPlace  # noqa: F401
420
from .framework import XPUPlace  # noqa: F401
421

C
chentianyu03 已提交
422 423
from .autograd import grad  # noqa: F401
from .autograd import no_grad  # noqa: F401
424
from .autograd import enable_grad  # noqa:F401
C
chentianyu03 已提交
425
from .autograd import set_grad_enabled  # noqa: F401
W
wuhuanzhou 已提交
426
from .autograd import is_grad_enabled  # noqa: F401
427 428
from .framework import save  # noqa: F401
from .framework import load  # noqa: F401
Q
qizhaoaoe 已提交
429
from .distributed import DataParallel  # noqa: F401
430

431 432
from .framework import set_default_dtype  # noqa: F401
from .framework import get_default_dtype  # noqa: F401
433

434 435 436 437 438 439
from .tensor.search import index_sample  # noqa: F401
from .tensor.stat import mean  # noqa: F401
from .tensor.stat import std  # noqa: F401
from .tensor.stat import var  # noqa: F401
from .tensor.stat import numel  # noqa: F401
from .tensor.stat import median  # noqa: F401
440
from .tensor.stat import nanmedian  # noqa: F401
441
from .tensor.stat import quantile  # noqa: F401
442
from .tensor.stat import nanquantile  # noqa: F401
443 444 445 446
from .device import get_cudnn_version  # noqa: F401
from .device import set_device  # noqa: F401
from .device import get_device  # noqa: F401
from .device import is_compiled_with_xpu  # noqa: F401
J
jianghaicheng 已提交
447
from .device import is_compiled_with_ipu  # noqa: F401
Z
zhiboniu 已提交
448 449 450
from .device import is_compiled_with_cinn  # noqa: F401
from .device import is_compiled_with_cuda  # noqa: F401
from .device import is_compiled_with_rocm  # noqa: F401
451
from .device import is_compiled_with_custom_device  # noqa: F401
452

453
# high-level api
454
from .hapi import Model  # noqa: F401
Z
zhiboniu 已提交
455
from . import callbacks  # noqa: F401
456 457
from .hapi import summary  # noqa: F401
from .hapi import flops  # noqa: F401
Z
zhiboniu 已提交
458
from . import hub  # noqa: F401
459
from . import linalg  # noqa: F401
Z
zhiboniu 已提交
460
from . import fft  # noqa: F401
461
from . import signal  # noqa: F401
462

463 464
import paddle.text  # noqa: F401
import paddle.vision  # noqa: F401
P
pangyoki 已提交
465

466
from .tensor.random import check_shape  # noqa: F401
D
Difer 已提交
467
from .nn.initializer.lazy_init import LazyGuard  # noqa: F401
468 469 470 471

# CINN has to set a flag to include a lib
if is_compiled_with_cinn():
    import os
472

473 474 475 476
    package_dir = os.path.dirname(os.path.abspath(__file__))
    runtime_include_dir = os.path.join(package_dir, "libs")
    cuh_file = os.path.join(runtime_include_dir, "cinn_cuda_runtime_source.cuh")
    if os.path.exists(cuh_file):
477
        os.environ.setdefault('runtime_include_dir', runtime_include_dir)
478

P
pangyoki 已提交
479
disable_static()
480 481 482 483 484

from .new_ir_utils import _switch_to_new_ir  # noqa: F401

_switch_to_new_ir()

485
__all__ = [  # noqa
486
    'iinfo',
487
    'finfo',
488 489 490 491 492 493 494 495 496 497 498 499 500 501
    'dtype',
    'uint8',
    'int8',
    'int16',
    'int32',
    'int64',
    'float16',
    'float32',
    'float64',
    'bfloat16',
    'bool',
    'complex64',
    'complex128',
    'addmm',
G
GGBond8488 已提交
502
    'addmm_',
503 504 505 506 507 508 509 510 511 512 513 514 515 516
    'allclose',
    'isclose',
    't',
    'add',
    'subtract',
    'diag',
    'diagflat',
    'isnan',
    'scatter_nd_add',
    'unstack',
    'get_default_dtype',
    'save',
    'multinomial',
    'get_cuda_rng_state',
Q
QingshuChen 已提交
517
    'get_rng_state',
518 519 520 521
    'rank',
    'empty_like',
    'eye',
    'cumsum',
522
    'cumsum_',
523 524
    'cummax',
    'cummin',
525
    'cumprod',
526
    'cumprod_',
Z
zhiboniu 已提交
527
    'logaddexp',
528
    'logcumsumexp',
529
    'logit',
530
    'logit_',
531
    'LazyGuard',
532 533 534
    'sign',
    'is_empty',
    'equal',
535
    'equal_',
536 537 538 539 540 541
    'equal_all',
    'is_tensor',
    'is_complex',
    'is_integer',
    'cross',
    'where',
542
    'where_',
543 544
    'log1p',
    'cos',
G
GGBond8488 已提交
545
    'cos_',
546
    'tan',
G
GGBond8488 已提交
547
    'tan_',
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
    'mean',
    'mode',
    'mv',
    'in_dynamic_mode',
    'min',
    'amin',
    'any',
    'slice',
    'normal',
    'logsumexp',
    'full',
    'unsqueeze',
    'unsqueeze_',
    'argmax',
    'Model',
    'summary',
    'flops',
    'sort',
    'searchsorted',
567
    'bucketize',
568
    'split',
569
    'vsplit',
570
    'logical_and',
571
    'logical_and_',
572 573
    'full_like',
    'less_than',
574
    'less_than_',
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
    'kron',
    'clip',
    'Tensor',
    'crop',
    'ParamAttr',
    'stanh',
    'randint',
    'randint_like',
    'assign',
    'gather',
    'scale',
    'zeros',
    'rsqrt',
    'squeeze',
    'squeeze_',
    'to_tensor',
    'gather_nd',
    'isinf',
    'uniform',
    'floor_divide',
595
    'floor_divide_',
596
    'remainder',
597
    'remainder_',
598
    'floor_mod',
599
    'floor_mod_',
600 601 602 603 604
    'roll',
    'batch',
    'max',
    'amax',
    'logical_or',
605
    'logical_or_',
606
    'bitwise_and',
607
    'bitwise_and_',
608
    'bitwise_or',
609
    'bitwise_or_',
610
    'bitwise_xor',
611
    'bitwise_xor_',
612
    'bitwise_not',
613
    'bitwise_not_',
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
    'mm',
    'flip',
    'rot90',
    'bincount',
    'histogram',
    'multiplex',
    'CUDAPlace',
    'empty',
    'shape',
    'real',
    'imag',
    'is_floating_point',
    'complex',
    'reciprocal',
    'rand',
    'less_equal',
630
    'less_equal_',
631
    'triu',
632
    'triu_',
633
    'sin',
G
GGBond8488 已提交
634
    'sin_',
635
    'dist',
636
    'cdist',
637 638 639 640 641 642 643 644 645 646
    'unbind',
    'meshgrid',
    'arange',
    'load',
    'numel',
    'median',
    'nanmedian',
    'quantile',
    'nanquantile',
    'no_grad',
H
Hui Zhang 已提交
647
    'enable_grad',
648 649 650
    'set_grad_enabled',
    'is_grad_enabled',
    'mod',
651
    'mod_',
652
    'abs',
G
GGBond8488 已提交
653
    'abs_',
654
    'tril',
655
    'tril_',
656
    'pow',
657
    'pow_',
658 659 660 661 662 663 664 665
    'zeros_like',
    'maximum',
    'topk',
    'index_select',
    'CPUPlace',
    'matmul',
    'seed',
    'acos',
G
GGBond8488 已提交
666
    'acos_',
667 668 669
    'logical_xor',
    'exp',
    'expm1',
G
GGBond8488 已提交
670
    'expm1_',
671 672 673
    'bernoulli',
    'poisson',
    'sinh',
G
GGBond8488 已提交
674
    'sinh_',
675 676 677 678 679 680 681
    'round',
    'DataParallel',
    'argmin',
    'prod',
    'broadcast_shape',
    'conj',
    'neg',
682
    'neg_',
683
    'lgamma',
684
    'lgamma_',
685 686 687 688 689
    'lerp',
    'erfinv',
    'inner',
    'outer',
    'square',
G
GGBond8488 已提交
690
    'square_',
691
    'divide',
692
    'divide_',
693 694
    'ceil',
    'atan',
G
GGBond8488 已提交
695
    'atan_',
696 697 698 699
    'atan2',
    'rad2deg',
    'deg2rad',
    'gcd',
700
    'gcd_',
701
    'lcm',
702
    'lcm_',
703 704 705 706 707
    'expand',
    'broadcast_to',
    'ones_like',
    'index_sample',
    'cast',
708
    'cast_',
709 710 711 712 713 714 715
    'grad',
    'all',
    'ones',
    'not_equal',
    'sum',
    'nansum',
    'nanmean',
716
    'count_nonzero',
717 718
    'tile',
    'greater_equal',
719
    'greater_equal_',
720 721 722 723 724
    'isfinite',
    'create_parameter',
    'dot',
    'increment',
    'erf',
G
GGBond8488 已提交
725
    'erf_',
726 727 728 729 730
    'bmm',
    'chunk',
    'tolist',
    'tensordot',
    'greater_than',
731
    'greater_than_',
732 733 734 735 736 737 738 739 740 741
    'shard_index',
    'argsort',
    'tanh',
    'tanh_',
    'transpose',
    'randn',
    'strided_slice',
    'unique',
    'unique_consecutive',
    'set_cuda_rng_state',
Q
QingshuChen 已提交
742
    'set_rng_state',
743 744 745 746 747
    'set_printoptions',
    'std',
    'flatten',
    'asin',
    'multiply',
748
    'multiply_',
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768
    'disable_static',
    'masked_select',
    'var',
    'trace',
    'enable_static',
    'scatter_nd',
    'set_default_dtype',
    'disable_signal_handler',
    'expand_as',
    'stack',
    'sqrt',
    'randperm',
    'linspace',
    'logspace',
    'reshape',
    'reshape_',
    'reverse',
    'nonzero',
    'CUDAPinnedPlace',
    'logical_not',
769
    'logical_not_',
770 771 772 773 774 775 776
    'add_n',
    'minimum',
    'scatter',
    'scatter_',
    'floor',
    'cosh',
    'log',
777
    'log_',
778
    'log2',
779
    'log2_',
780
    'log10',
781
    'log10_',
782 783 784
    'concat',
    'check_shape',
    'trunc',
785
    'trunc_',
786
    'frac',
787
    'frac_',
788
    'digamma',
789
    'digamma_',
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
    'standard_normal',
    'diagonal',
    'broadcast_tensors',
    'einsum',
    'set_flags',
    'get_flags',
    'asinh',
    'acosh',
    'atanh',
    'as_complex',
    'as_real',
    'diff',
    'angle',
    'fmax',
    'fmin',
    'moveaxis',
    'repeat_interleave',
    'clone',
    'kthvalue',
    'renorm',
810
    'renorm_',
811 812
    'take_along_axis',
    'put_along_axis',
813
    'nan_to_num',
814
    'nan_to_num_',
815 816
    'heaviside',
    'tril_indices',
L
Li Min 已提交
817 818
    'index_add',
    "index_add_",
傅剑寒 已提交
819 820
    "index_put",
    "index_put_",
821
    'sgn',
822
    'triu_indices',
823
    'take',
824
    'frexp',
825
    'ldexp',
826
    'ldexp_',
827 828
    'trapezoid',
    'cumulative_trapezoid',
829
    'polar',
830
    'vander',
831
    'unflatten',
W
wanghuancoder 已提交
832 833 834 835
    'as_strided',
    'view',
    'view_as',
    'unfold',
836
    'nextafter',
837
    'i0',
838
    'i0_',
839
    'i0e',
840 841
    'i1',
    'i1e',
842
    'polygamma',
843
    'polygamma_',
844
]