__init__.py 21.8 KB
Newer Older
1
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
Z
zhangjinchao01 已提交
2 3 4 5 6 7 8 9 10 11 12 13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Y
Yancey 已提交
14
try:
15 16
    from paddle.version import full_version as __version__
    from paddle.version import commit as __git_commit__
17
    from paddle.cuda_env import *  # noqa: F403
Y
Yancey 已提交
18 19
except ImportError:
    import sys
20 21 22

    sys.stderr.write(
        '''Warning with import paddle: you should not
Y
Yancey 已提交
23
     import paddle from the source directory; please install paddlepaddle*.whl firstly.'''
24
    )
25

26
from .batch import batch  # noqa: F401
Z
zhiboniu 已提交
27 28
from .framework import monkey_patch_variable
from .framework import monkey_patch_math_varbase
29

30 31
monkey_patch_variable()
monkey_patch_math_varbase()
Z
zhiboniu 已提交
32 33 34 35 36 37 38 39

from .framework import disable_signal_handler  # noqa: F401
from .framework import get_flags  # noqa: F401
from .framework import set_flags  # noqa: F401

from .framework import disable_static  # noqa: F401
from .framework import enable_static  # noqa: F401
from .framework import in_dynamic_mode  # noqa: F401
40
from .fluid.dataset import *  # noqa: F401, F403
41
from .fluid.lazy_init import LazyGuard  # noqa: F401
Z
zhiboniu 已提交
42

43
from .framework.dtype import iinfo  # noqa: F401
44
from .framework.dtype import finfo  # noqa: F401
45
from .framework.dtype import dtype  # noqa: F401
Z
zhiboniu 已提交
46 47 48 49 50 51 52 53 54 55 56 57
from .framework.dtype import uint8  # noqa: F401
from .framework.dtype import int8  # noqa: F401
from .framework.dtype import int16  # noqa: F401
from .framework.dtype import int32  # noqa: F401
from .framework.dtype import int64  # noqa: F401
from .framework.dtype import float16  # noqa: F401
from .framework.dtype import float32  # noqa: F401
from .framework.dtype import float64  # noqa: F401
from .framework.dtype import bfloat16  # noqa: F401
from .framework.dtype import bool  # noqa: F401
from .framework.dtype import complex64  # noqa: F401
from .framework.dtype import complex128  # noqa: F401
58

W
wanghuancoder 已提交
59
Tensor = framework.core.eager.Tensor  # noqa: F401
60 61 62 63 64 65 66 67 68 69 70
Tensor.__qualname__ = 'Tensor'  # noqa: F401
import paddle.distributed  # noqa: F401
import paddle.sysconfig  # noqa: F401
import paddle.distribution  # noqa: F401
import paddle.nn  # noqa: F401
import paddle.distributed.fleet  # noqa: F401
import paddle.optimizer  # noqa: F401
import paddle.metric  # noqa: F401
import paddle.regularizer  # noqa: F401
import paddle.incubate  # noqa: F401
import paddle.autograd  # noqa: F401
71
import paddle.device  # noqa: F401
72

73 74 75 76 77 78 79 80 81
import paddle.jit  # noqa: F401
import paddle.amp  # noqa: F401
import paddle.dataset  # noqa: F401
import paddle.inference  # noqa: F401
import paddle.io  # noqa: F401
import paddle.onnx  # noqa: F401
import paddle.reader  # noqa: F401
import paddle.static  # noqa: F401
import paddle.vision  # noqa: F401
Y
YangZhou 已提交
82
import paddle.audio  # noqa: F401
83
import paddle.geometric  # noqa: F401
84
import paddle.sparse  # noqa: F401
85
import paddle.quantization  # noqa: F401
86

87 88
from .tensor.attribute import is_complex  # noqa: F401
from .tensor.attribute import is_integer  # noqa: F401
89 90 91 92
from .tensor.attribute import rank  # noqa: F401
from .tensor.attribute import shape  # noqa: F401
from .tensor.attribute import real  # noqa: F401
from .tensor.attribute import imag  # noqa: F401
W
wuhuanzhou 已提交
93
from .tensor.attribute import is_floating_point  # noqa: F401
94
from .tensor.creation import create_parameter  # noqa: F401
95 96
from .tensor.creation import to_tensor  # noqa: F401
from .tensor.creation import diag  # noqa: F401
L
Li Min 已提交
97
from .tensor.creation import diagflat  # noqa: F401
98 99
from .tensor.creation import eye  # noqa: F401
from .tensor.creation import linspace  # noqa: F401
100
from .tensor.creation import logspace  # noqa: F401
101 102 103 104 105 106 107 108 109 110 111 112 113
from .tensor.creation import ones  # noqa: F401
from .tensor.creation import ones_like  # noqa: F401
from .tensor.creation import zeros  # noqa: F401
from .tensor.creation import zeros_like  # noqa: F401
from .tensor.creation import arange  # noqa: F401
from .tensor.creation import full  # noqa: F401
from .tensor.creation import full_like  # noqa: F401
from .tensor.creation import triu  # noqa: F401
from .tensor.creation import tril  # noqa: F401
from .tensor.creation import meshgrid  # noqa: F401
from .tensor.creation import empty  # noqa: F401
from .tensor.creation import empty_like  # noqa: F401
from .tensor.creation import assign  # noqa: F401
F
Feiyu Chan 已提交
114
from .tensor.creation import complex  # noqa: F401
115
from .tensor.creation import clone  # noqa: F401
116 117
from .tensor.creation import tril_indices  # noqa: F401
from .tensor.creation import triu_indices  # noqa: F401
118
from .tensor.creation import polar  # noqa: F401
119 120 121 122 123 124 125 126 127 128
from .tensor.linalg import matmul  # noqa: F401
from .tensor.linalg import dot  # noqa: F401
from .tensor.linalg import norm  # noqa: F401
from .tensor.linalg import transpose  # noqa: F401
from .tensor.linalg import dist  # noqa: F401
from .tensor.linalg import t  # noqa: F401
from .tensor.linalg import cross  # noqa: F401
from .tensor.linalg import cholesky  # noqa: F401
from .tensor.linalg import bmm  # noqa: F401
from .tensor.linalg import histogram  # noqa: F401
S
smallv0221 已提交
129
from .tensor.linalg import bincount  # noqa: F401
130 131
from .tensor.linalg import mv  # noqa: F401
from .tensor.logic import equal  # noqa: F401
132
from .tensor.linalg import eigvalsh  # noqa: F401
133 134 135 136 137 138 139 140 141
from .tensor.logic import greater_equal  # noqa: F401
from .tensor.logic import greater_than  # noqa: F401
from .tensor.logic import is_empty  # noqa: F401
from .tensor.logic import less_equal  # noqa: F401
from .tensor.logic import less_than  # noqa: F401
from .tensor.logic import logical_and  # noqa: F401
from .tensor.logic import logical_not  # noqa: F401
from .tensor.logic import logical_or  # noqa: F401
from .tensor.logic import logical_xor  # noqa: F401
142 143 144 145
from .tensor.logic import bitwise_and  # noqa: F401
from .tensor.logic import bitwise_not  # noqa: F401
from .tensor.logic import bitwise_or  # noqa: F401
from .tensor.logic import bitwise_xor  # noqa: F401
146 147
from .tensor.logic import not_equal  # noqa: F401
from .tensor.logic import allclose  # noqa: F401
A
andyjpaddle 已提交
148
from .tensor.logic import isclose  # noqa: F401
149 150 151 152
from .tensor.logic import equal_all  # noqa: F401
from .tensor.logic import is_tensor  # noqa: F401
from .tensor.manipulation import cast  # noqa: F401
from .tensor.manipulation import concat  # noqa: F401
153
from .tensor.manipulation import broadcast_tensors  # noqa: F401
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
from .tensor.manipulation import expand  # noqa: F401
from .tensor.manipulation import broadcast_to  # noqa: F401
from .tensor.manipulation import expand_as  # noqa: F401
from .tensor.manipulation import tile  # noqa: F401
from .tensor.manipulation import flatten  # noqa: F401
from .tensor.manipulation import gather  # noqa: F401
from .tensor.manipulation import gather_nd  # noqa: F401
from .tensor.manipulation import reshape  # noqa: F401
from .tensor.manipulation import reshape_  # noqa: F401
from .tensor.manipulation import flip as reverse  # noqa: F401
from .tensor.manipulation import scatter  # noqa: F401
from .tensor.manipulation import scatter_  # noqa: F401
from .tensor.manipulation import scatter_nd_add  # noqa: F401
from .tensor.manipulation import scatter_nd  # noqa: F401
from .tensor.manipulation import shard_index  # noqa: F401
from .tensor.manipulation import slice  # noqa: F401
Z
zhiboniu 已提交
170
from .tensor.manipulation import crop  # noqa: F401
171
from .tensor.manipulation import split  # noqa: F401
172
from .tensor.manipulation import vsplit  # noqa: F401
173 174 175 176 177
from .tensor.manipulation import squeeze  # noqa: F401
from .tensor.manipulation import squeeze_  # noqa: F401
from .tensor.manipulation import stack  # noqa: F401
from .tensor.manipulation import strided_slice  # noqa: F401
from .tensor.manipulation import unique  # noqa: F401
D
duanboqiang 已提交
178
from .tensor.manipulation import unique_consecutive  # noqa: F401
179 180 181 182
from .tensor.manipulation import unsqueeze  # noqa: F401
from .tensor.manipulation import unsqueeze_  # noqa: F401
from .tensor.manipulation import unstack  # noqa: F401
from .tensor.manipulation import flip  # noqa: F401
Z
zmxdream 已提交
183
from .tensor.manipulation import rot90  # noqa: F401
184 185 186 187
from .tensor.manipulation import unbind  # noqa: F401
from .tensor.manipulation import roll  # noqa: F401
from .tensor.manipulation import chunk  # noqa: F401
from .tensor.manipulation import tolist  # noqa: F401
188
from .tensor.manipulation import take_along_axis  # noqa: F401
189
from .tensor.manipulation import put_along_axis  # noqa: F401
F
From00 已提交
190
from .tensor.manipulation import tensordot  # noqa: F401
191 192
from .tensor.manipulation import as_complex  # noqa: F401
from .tensor.manipulation import as_real  # noqa: F401
193
from .tensor.manipulation import moveaxis  # noqa: F401
K
kuizhiqing 已提交
194
from .tensor.manipulation import repeat_interleave  # noqa: F401
L
Li Min 已提交
195 196
from .tensor.manipulation import index_add  # noqa: F401
from .tensor.manipulation import index_add_  # noqa: F401
197 198 199 200
from .tensor.math import abs  # noqa: F401
from .tensor.math import acos  # noqa: F401
from .tensor.math import asin  # noqa: F401
from .tensor.math import atan  # noqa: F401
R
ronnywang 已提交
201
from .tensor.math import atan2  # noqa: F401
202 203 204 205 206
from .tensor.math import ceil  # noqa: F401
from .tensor.math import cos  # noqa: F401
from .tensor.math import tan  # noqa: F401
from .tensor.math import cosh  # noqa: F401
from .tensor.math import cumsum  # noqa: F401
H
hlygit66666 已提交
207
from .tensor.math import cumprod  # noqa: F401
208
from .tensor.math import logcumsumexp  # noqa: F401
W
wangzhen38 已提交
209
from .tensor.math import logit  # noqa: F401
210
from .tensor.math import exp  # noqa: F401
R
ronnywang 已提交
211
from .tensor.math import expm1  # noqa: F401
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
from .tensor.math import floor  # noqa: F401
from .tensor.math import increment  # noqa: F401
from .tensor.math import log  # noqa: F401
from .tensor.math import log2  # noqa: F401
from .tensor.math import log10  # noqa: F401
from .tensor.math import multiplex  # noqa: F401
from .tensor.math import pow  # noqa: F401
from .tensor.math import reciprocal  # noqa: F401
from .tensor.math import all  # noqa: F401
from .tensor.math import any  # noqa: F401
from .tensor.math import round  # noqa: F401
from .tensor.math import rsqrt  # noqa: F401
from .tensor.math import scale  # noqa: F401
from .tensor.math import sign  # noqa: F401
from .tensor.math import sin  # noqa: F401
from .tensor.math import sinh  # noqa: F401
from .tensor.math import sqrt  # noqa: F401
from .tensor.math import square  # noqa: F401
from .tensor.math import stanh  # noqa: F401
from .tensor.math import sum  # noqa: F401
232
from .tensor.math import nan_to_num  # noqa: F401
W
wangguanqun 已提交
233
from .tensor.math import nansum  # noqa: F401
234
from .tensor.math import nanmean  # noqa: F401
235
from .tensor.math import count_nonzero  # noqa: F401
236 237 238 239 240
from .tensor.math import tanh  # noqa: F401
from .tensor.math import tanh_  # noqa: F401
from .tensor.math import add_n  # noqa: F401
from .tensor.math import max  # noqa: F401
from .tensor.math import maximum  # noqa: F401
T
Tao Luo 已提交
241
from .tensor.math import amax  # noqa: F401
242 243
from .tensor.math import min  # noqa: F401
from .tensor.math import minimum  # noqa: F401
T
Tao Luo 已提交
244
from .tensor.math import amin  # noqa: F401
245 246 247 248
from .tensor.math import mm  # noqa: F401
from .tensor.math import divide  # noqa: F401
from .tensor.math import floor_divide  # noqa: F401
from .tensor.math import remainder  # noqa: F401
249
from .tensor.math import remainder_  # noqa: F401
250 251 252
from .tensor.math import mod  # noqa: F401
from .tensor.math import floor_mod  # noqa: F401
from .tensor.math import multiply  # noqa: F401
S
seemingwang 已提交
253
from .tensor.math import renorm  # noqa: F401
254 255 256 257 258 259 260 261 262
from .tensor.math import add  # noqa: F401
from .tensor.math import subtract  # noqa: F401
from .tensor.math import logsumexp  # noqa: F401
from .tensor.math import inverse  # noqa: F401
from .tensor.math import log1p  # noqa: F401
from .tensor.math import erf  # noqa: F401
from .tensor.math import addmm  # noqa: F401
from .tensor.math import clip  # noqa: F401
from .tensor.math import trace  # noqa: F401
263
from .tensor.math import diagonal  # noqa: F401
264 265 266 267 268 269 270
from .tensor.math import kron  # noqa: F401
from .tensor.math import isfinite  # noqa: F401
from .tensor.math import isinf  # noqa: F401
from .tensor.math import isnan  # noqa: F401
from .tensor.math import prod  # noqa: F401
from .tensor.math import broadcast_shape  # noqa: F401
from .tensor.math import conj  # noqa: F401
271
from .tensor.math import trunc  # noqa: F401
Z
zyfncg 已提交
272
from .tensor.math import digamma  # noqa: F401
273 274
from .tensor.math import neg  # noqa: F401
from .tensor.math import lgamma  # noqa: F401
X
xiaoting 已提交
275 276 277
from .tensor.math import acosh  # noqa: F401
from .tensor.math import asinh  # noqa: F401
from .tensor.math import atanh  # noqa: F401
278
from .tensor.math import lerp  # noqa: F401
W
wuhuanzhou 已提交
279
from .tensor.math import erfinv  # noqa: F401
280 281
from .tensor.math import rad2deg  # noqa: F401
from .tensor.math import deg2rad  # noqa: F401
T
Tao Luo 已提交
282 283
from .tensor.math import gcd  # noqa: F401
from .tensor.math import lcm  # noqa: F401
A
andyjpaddle 已提交
284
from .tensor.math import diff  # noqa: F401
F
Feiyu Chan 已提交
285
from .tensor.math import angle  # noqa: F401
L
LJQ❤️ 已提交
286 287
from .tensor.math import fmax  # noqa: F401
from .tensor.math import fmin  # noqa: F401
Z
zhiboniu 已提交
288 289
from .tensor.math import inner  # noqa: F401
from .tensor.math import outer  # noqa: F401
290
from .tensor.math import heaviside  # noqa: F401
291
from .tensor.math import frac  # noqa: F401
292
from .tensor.math import sgn  # noqa: F401
293
from .tensor.math import take  # noqa: F401
294
from .tensor.math import frexp  # noqa: F401
295 296
from .tensor.math import trapezoid  # noqa: F401
from .tensor.math import cumulative_trapezoid  # noqa: F401
297
from .tensor.math import vander  # noqa: F401
L
Leo Chen 已提交
298

299 300
from .tensor.random import bernoulli  # noqa: F401
from .tensor.random import poisson  # noqa: F401
301 302 303 304 305 306 307
from .tensor.random import multinomial  # noqa: F401
from .tensor.random import standard_normal  # noqa: F401
from .tensor.random import normal  # noqa: F401
from .tensor.random import uniform  # noqa: F401
from .tensor.random import randn  # noqa: F401
from .tensor.random import rand  # noqa: F401
from .tensor.random import randint  # noqa: F401
308
from .tensor.random import randint_like  # noqa: F401
309 310 311 312
from .tensor.random import randperm  # noqa: F401
from .tensor.search import argmax  # noqa: F401
from .tensor.search import argmin  # noqa: F401
from .tensor.search import argsort  # noqa: F401
Y
Yanxing Shi 已提交
313
from .tensor.search import searchsorted  # noqa: F401
314
from .tensor.search import bucketize  # noqa: F401
315 316 317 318 319 320
from .tensor.search import masked_select  # noqa: F401
from .tensor.search import topk  # noqa: F401
from .tensor.search import where  # noqa: F401
from .tensor.search import index_select  # noqa: F401
from .tensor.search import nonzero  # noqa: F401
from .tensor.search import sort  # noqa: F401
321
from .tensor.search import kthvalue  # noqa: F401
322
from .tensor.search import mode  # noqa: F401
323

324
from .tensor.to_string import set_printoptions  # noqa: F401
325

T
Tongxin Bai 已提交
326 327
from .tensor.einsum import einsum  # noqa: F401

328 329 330
from .framework.random import seed  # noqa: F401
from .framework.random import get_cuda_rng_state  # noqa: F401
from .framework.random import set_cuda_rng_state  # noqa: F401
Q
QingshuChen 已提交
331 332
from .framework.random import get_rng_state  # noqa: F401
from .framework.random import set_rng_state  # noqa: F401
333 334
from .framework import ParamAttr  # noqa: F401
from .framework import CPUPlace  # noqa: F401
J
jianghaicheng 已提交
335
from .framework import IPUPlace  # noqa: F401
336 337
from .framework import CUDAPlace  # noqa: F401
from .framework import CUDAPinnedPlace  # noqa: F401
338
from .framework import CustomPlace  # noqa: F401
339

C
chentianyu03 已提交
340 341
from .autograd import grad  # noqa: F401
from .autograd import no_grad  # noqa: F401
342
from .autograd import enable_grad  # noqa:F401
C
chentianyu03 已提交
343
from .autograd import set_grad_enabled  # noqa: F401
W
wuhuanzhou 已提交
344
from .autograd import is_grad_enabled  # noqa: F401
345 346
from .framework import save  # noqa: F401
from .framework import load  # noqa: F401
Q
qizhaoaoe 已提交
347
from .distributed import DataParallel  # noqa: F401
348

349 350
from .framework import set_default_dtype  # noqa: F401
from .framework import get_default_dtype  # noqa: F401
351

352 353 354 355 356 357
from .tensor.search import index_sample  # noqa: F401
from .tensor.stat import mean  # noqa: F401
from .tensor.stat import std  # noqa: F401
from .tensor.stat import var  # noqa: F401
from .tensor.stat import numel  # noqa: F401
from .tensor.stat import median  # noqa: F401
358
from .tensor.stat import nanmedian  # noqa: F401
359
from .tensor.stat import quantile  # noqa: F401
360
from .tensor.stat import nanquantile  # noqa: F401
361 362 363 364
from .device import get_cudnn_version  # noqa: F401
from .device import set_device  # noqa: F401
from .device import get_device  # noqa: F401
from .device import is_compiled_with_xpu  # noqa: F401
J
jianghaicheng 已提交
365
from .device import is_compiled_with_ipu  # noqa: F401
Z
zhiboniu 已提交
366 367 368
from .device import is_compiled_with_cinn  # noqa: F401
from .device import is_compiled_with_cuda  # noqa: F401
from .device import is_compiled_with_rocm  # noqa: F401
369
from .device import is_compiled_with_custom_device  # noqa: F401
370
from .device import XPUPlace  # noqa: F401
371

372
# high-level api
373
from .hapi import Model  # noqa: F401
Z
zhiboniu 已提交
374
from . import callbacks  # noqa: F401
375 376
from .hapi import summary  # noqa: F401
from .hapi import flops  # noqa: F401
Z
zhiboniu 已提交
377
from . import hub  # noqa: F401
378
from . import linalg  # noqa: F401
Z
zhiboniu 已提交
379
from . import fft  # noqa: F401
380
from . import signal  # noqa: F401
381

382 383
import paddle.text  # noqa: F401
import paddle.vision  # noqa: F401
P
pangyoki 已提交
384

385
from .tensor.random import check_shape  # noqa: F401
386 387 388 389

# CINN has to set a flag to include a lib
if is_compiled_with_cinn():
    import os
390

391 392 393 394
    package_dir = os.path.dirname(os.path.abspath(__file__))
    runtime_include_dir = os.path.join(package_dir, "libs")
    cuh_file = os.path.join(runtime_include_dir, "cinn_cuda_runtime_source.cuh")
    if os.path.exists(cuh_file):
395
        os.environ.setdefault('runtime_include_dir', runtime_include_dir)
396

P
pangyoki 已提交
397
disable_static()
398
__all__ = [  # noqa
399
    'iinfo',
400
    'finfo',
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
    'dtype',
    'uint8',
    'int8',
    'int16',
    'int32',
    'int64',
    'float16',
    'float32',
    'float64',
    'bfloat16',
    'bool',
    'complex64',
    'complex128',
    'addmm',
    'allclose',
    'isclose',
    't',
    'add',
    'subtract',
    'diag',
    'diagflat',
    'isnan',
    'scatter_nd_add',
    'unstack',
    'get_default_dtype',
    'save',
    'multinomial',
    'get_cuda_rng_state',
Q
QingshuChen 已提交
429
    'get_rng_state',
430 431 432 433 434
    'rank',
    'empty_like',
    'eye',
    'cumsum',
    'cumprod',
435
    'logcumsumexp',
436
    'logit',
437
    'LazyGuard',
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
    'sign',
    'is_empty',
    'equal',
    'equal_all',
    'is_tensor',
    'is_complex',
    'is_integer',
    'cross',
    'where',
    'log1p',
    'cos',
    'tan',
    'mean',
    'mode',
    'mv',
    'in_dynamic_mode',
    'min',
    'amin',
    'any',
    'slice',
    'normal',
    'logsumexp',
    'full',
    'unsqueeze',
    'unsqueeze_',
    'argmax',
    'Model',
    'summary',
    'flops',
    'sort',
    'searchsorted',
469
    'bucketize',
470
    'split',
471
    'vsplit',
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
    'logical_and',
    'full_like',
    'less_than',
    'kron',
    'clip',
    'Tensor',
    'crop',
    'ParamAttr',
    'stanh',
    'randint',
    'randint_like',
    'assign',
    'gather',
    'scale',
    'zeros',
    'rsqrt',
    'squeeze',
    'squeeze_',
    'to_tensor',
    'gather_nd',
    'isinf',
    'uniform',
    'floor_divide',
    'remainder',
    'floor_mod',
    'roll',
    'batch',
    'max',
    'amax',
    'logical_or',
    'bitwise_and',
    'bitwise_or',
    'bitwise_xor',
    'bitwise_not',
    'mm',
    'flip',
    'rot90',
    'bincount',
    'histogram',
    'multiplex',
    'CUDAPlace',
    'empty',
    'shape',
    'real',
    'imag',
    'is_floating_point',
    'complex',
    'reciprocal',
    'rand',
    'less_equal',
    'triu',
    'sin',
    'dist',
    'unbind',
    'meshgrid',
    'arange',
    'load',
    'numel',
    'median',
    'nanmedian',
    'quantile',
    'nanquantile',
    'no_grad',
H
Hui Zhang 已提交
535
    'enable_grad',
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
    'set_grad_enabled',
    'is_grad_enabled',
    'mod',
    'abs',
    'tril',
    'pow',
    'zeros_like',
    'maximum',
    'topk',
    'index_select',
    'CPUPlace',
    'matmul',
    'seed',
    'acos',
    'logical_xor',
    'exp',
    'expm1',
    'bernoulli',
    'poisson',
    'sinh',
    'round',
    'DataParallel',
    'argmin',
    'prod',
    'broadcast_shape',
    'conj',
    'neg',
    'lgamma',
    'lerp',
    'erfinv',
    'inner',
    'outer',
    'square',
    'divide',
    'ceil',
    'atan',
    'atan2',
    'rad2deg',
    'deg2rad',
    'gcd',
    'lcm',
    'expand',
    'broadcast_to',
    'ones_like',
    'index_sample',
    'cast',
    'grad',
    'all',
    'ones',
    'not_equal',
    'sum',
    'nansum',
    'nanmean',
589
    'count_nonzero',
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
    'tile',
    'greater_equal',
    'isfinite',
    'create_parameter',
    'dot',
    'increment',
    'erf',
    'bmm',
    'chunk',
    'tolist',
    'tensordot',
    'greater_than',
    'shard_index',
    'argsort',
    'tanh',
    'tanh_',
    'transpose',
    'randn',
    'strided_slice',
    'unique',
    'unique_consecutive',
    'set_cuda_rng_state',
Q
QingshuChen 已提交
612
    'set_rng_state',
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
    'set_printoptions',
    'std',
    'flatten',
    'asin',
    'multiply',
    'disable_static',
    'masked_select',
    'var',
    'trace',
    'enable_static',
    'scatter_nd',
    'set_default_dtype',
    'disable_signal_handler',
    'expand_as',
    'stack',
    'sqrt',
    'randperm',
    'linspace',
    'logspace',
    'reshape',
    'reshape_',
    'reverse',
    'nonzero',
    'CUDAPinnedPlace',
    'logical_not',
    'add_n',
    'minimum',
    'scatter',
    'scatter_',
    'floor',
    'cosh',
    'log',
    'log2',
    'log10',
    'concat',
    'check_shape',
    'trunc',
    'frac',
    'digamma',
    'standard_normal',
    'diagonal',
    'broadcast_tensors',
    'einsum',
    'set_flags',
    'get_flags',
    'asinh',
    'acosh',
    'atanh',
    'as_complex',
    'as_real',
    'diff',
    'angle',
    'fmax',
    'fmin',
    'moveaxis',
    'repeat_interleave',
    'clone',
    'kthvalue',
    'renorm',
    'take_along_axis',
    'put_along_axis',
674
    'nan_to_num',
675 676
    'heaviside',
    'tril_indices',
L
Li Min 已提交
677 678
    'index_add',
    "index_add_",
679
    'sgn',
680
    'triu_indices',
681
    'take',
682
    'frexp',
683 684
    'trapezoid',
    'cumulative_trapezoid',
685
    'polar',
686
    'vander',
687
]