__init__.py 18.7 KB
Newer Older
1
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
Z
zhangjinchao01 已提交
2 3 4 5 6 7 8 9 10 11 12 13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Y
Yancey 已提交
14
try:
15 16
    from paddle.version import full_version as __version__
    from paddle.version import commit as __git_commit__
17
    from paddle.cuda_env import *
Y
Yancey 已提交
18 19
except ImportError:
    import sys
20
    sys.stderr.write('''Warning with import paddle: you should not
Y
Yancey 已提交
21 22
     import paddle from the source directory; please install paddlepaddle*.whl firstly.'''
                     )
23

24
from .batch import batch  # noqa: F401
25 26 27 28
from .fluid import monkey_patch_variable
from .fluid.dygraph import monkey_patch_math_varbase
monkey_patch_variable()
monkey_patch_math_varbase()
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
from .framework.dtype import dtype as dtype  # noqa: F401
from paddle.framework.dtype import uint8  # noqa: F401
from paddle.framework.dtype import int8  # noqa: F401
from paddle.framework.dtype import int16  # noqa: F401
from paddle.framework.dtype import int32  # noqa: F401
from paddle.framework.dtype import int64  # noqa: F401
from paddle.framework.dtype import float16  # noqa: F401
from paddle.framework.dtype import float32  # noqa: F401
from paddle.framework.dtype import float64  # noqa: F401
from paddle.framework.dtype import bfloat16  # noqa: F401
from paddle.framework.dtype import bool  # noqa: F401
from paddle.framework.dtype import complex64  # noqa: F401
from paddle.framework.dtype import complex128  # noqa: F401
from .framework import VarBase as Tensor  # noqa: F401
Tensor.__qualname__ = 'Tensor'  # noqa: F401
import paddle.compat  # noqa: F401
import paddle.distributed  # noqa: F401
import paddle.sysconfig  # noqa: F401
import paddle.distribution  # noqa: F401
import paddle.nn  # noqa: F401
import paddle.distributed.fleet  # noqa: F401
import paddle.optimizer  # noqa: F401
import paddle.metric  # noqa: F401
import paddle.regularizer  # noqa: F401
import paddle.incubate  # noqa: F401
import paddle.autograd  # noqa: F401
55
import paddle.device  # noqa: F401
56

57 58 59 60 61 62 63 64 65
import paddle.jit  # noqa: F401
import paddle.amp  # noqa: F401
import paddle.dataset  # noqa: F401
import paddle.inference  # noqa: F401
import paddle.io  # noqa: F401
import paddle.onnx  # noqa: F401
import paddle.reader  # noqa: F401
import paddle.static  # noqa: F401
import paddle.vision  # noqa: F401
66

67
from .tensor import fft
68
from .tensor.random import bernoulli  # noqa: F401
69

70 71 72 73 74 75
from .tensor.attribute import rank  # noqa: F401
from .tensor.attribute import shape  # noqa: F401
from .tensor.attribute import real  # noqa: F401
from .tensor.attribute import imag  # noqa: F401
from .tensor.creation import to_tensor  # noqa: F401
from .tensor.creation import diag  # noqa: F401
L
Li Min 已提交
76
from .tensor.creation import diagflat  # noqa: F401
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
from .tensor.creation import eye  # noqa: F401
from .tensor.creation import linspace  # noqa: F401
from .tensor.creation import ones  # noqa: F401
from .tensor.creation import ones_like  # noqa: F401
from .tensor.creation import zeros  # noqa: F401
from .tensor.creation import zeros_like  # noqa: F401
from .tensor.creation import arange  # noqa: F401
from .tensor.creation import full  # noqa: F401
from .tensor.creation import full_like  # noqa: F401
from .tensor.creation import triu  # noqa: F401
from .tensor.creation import tril  # noqa: F401
from .tensor.creation import meshgrid  # noqa: F401
from .tensor.creation import empty  # noqa: F401
from .tensor.creation import empty_like  # noqa: F401
from .tensor.creation import assign  # noqa: F401
from .tensor.linalg import matmul  # noqa: F401
from .tensor.linalg import dot  # noqa: F401
from .tensor.linalg import norm  # noqa: F401
from .tensor.linalg import transpose  # noqa: F401
from .tensor.linalg import dist  # noqa: F401
97
from .tensor.linalg import cond  # noqa: F401
98 99 100 101 102 103
from .tensor.linalg import t  # noqa: F401
from .tensor.linalg import cross  # noqa: F401
from .tensor.linalg import cholesky  # noqa: F401
from .tensor.linalg import bmm  # noqa: F401
from .tensor.linalg import histogram  # noqa: F401
from .tensor.linalg import mv  # noqa: F401
H
huangxu96 已提交
104 105
from .tensor.linalg import det  # noqa: F401
from .tensor.linalg import slogdet  # noqa: F401
106
from .tensor.linalg import multi_dot  # noqa: F401
107
from .tensor.linalg import matrix_power  # noqa: F401
108 109
from .tensor.linalg import svd  # noqa: F401
from .tensor.linalg import eigh  # noqa: F401
A
andyjpaddle 已提交
110
from .tensor.linalg import pinv  # noqa: F401
111 112 113 114 115 116 117 118 119 120
from .tensor.logic import equal  # noqa: F401
from .tensor.logic import greater_equal  # noqa: F401
from .tensor.logic import greater_than  # noqa: F401
from .tensor.logic import is_empty  # noqa: F401
from .tensor.logic import less_equal  # noqa: F401
from .tensor.logic import less_than  # noqa: F401
from .tensor.logic import logical_and  # noqa: F401
from .tensor.logic import logical_not  # noqa: F401
from .tensor.logic import logical_or  # noqa: F401
from .tensor.logic import logical_xor  # noqa: F401
121 122 123 124
from .tensor.logic import bitwise_and  # noqa: F401
from .tensor.logic import bitwise_not  # noqa: F401
from .tensor.logic import bitwise_or  # noqa: F401
from .tensor.logic import bitwise_xor  # noqa: F401
125 126 127 128 129 130
from .tensor.logic import not_equal  # noqa: F401
from .tensor.logic import allclose  # noqa: F401
from .tensor.logic import equal_all  # noqa: F401
from .tensor.logic import is_tensor  # noqa: F401
from .tensor.manipulation import cast  # noqa: F401
from .tensor.manipulation import concat  # noqa: F401
131
from .tensor.manipulation import broadcast_tensors  # noqa: F401
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
from .tensor.manipulation import expand  # noqa: F401
from .tensor.manipulation import broadcast_to  # noqa: F401
from .tensor.manipulation import expand_as  # noqa: F401
from .tensor.manipulation import tile  # noqa: F401
from .tensor.manipulation import flatten  # noqa: F401
from .tensor.manipulation import gather  # noqa: F401
from .tensor.manipulation import gather_nd  # noqa: F401
from .tensor.manipulation import reshape  # noqa: F401
from .tensor.manipulation import reshape_  # noqa: F401
from .tensor.manipulation import flip as reverse  # noqa: F401
from .tensor.manipulation import scatter  # noqa: F401
from .tensor.manipulation import scatter_  # noqa: F401
from .tensor.manipulation import scatter_nd_add  # noqa: F401
from .tensor.manipulation import scatter_nd  # noqa: F401
from .tensor.manipulation import shard_index  # noqa: F401
from .tensor.manipulation import slice  # noqa: F401
from .tensor.manipulation import split  # noqa: F401
from .tensor.manipulation import squeeze  # noqa: F401
from .tensor.manipulation import squeeze_  # noqa: F401
from .tensor.manipulation import stack  # noqa: F401
from .tensor.manipulation import strided_slice  # noqa: F401
from .tensor.manipulation import unique  # noqa: F401
D
duanboqiang 已提交
154
from .tensor.manipulation import unique_consecutive  # noqa: F401
155 156 157 158 159 160 161 162 163 164 165 166
from .tensor.manipulation import unsqueeze  # noqa: F401
from .tensor.manipulation import unsqueeze_  # noqa: F401
from .tensor.manipulation import unstack  # noqa: F401
from .tensor.manipulation import flip  # noqa: F401
from .tensor.manipulation import unbind  # noqa: F401
from .tensor.manipulation import roll  # noqa: F401
from .tensor.manipulation import chunk  # noqa: F401
from .tensor.manipulation import tolist  # noqa: F401
from .tensor.math import abs  # noqa: F401
from .tensor.math import acos  # noqa: F401
from .tensor.math import asin  # noqa: F401
from .tensor.math import atan  # noqa: F401
R
ronnywang 已提交
167
from .tensor.math import atan2  # noqa: F401
168 169 170 171 172
from .tensor.math import ceil  # noqa: F401
from .tensor.math import cos  # noqa: F401
from .tensor.math import tan  # noqa: F401
from .tensor.math import cosh  # noqa: F401
from .tensor.math import cumsum  # noqa: F401
H
hlygit66666 已提交
173
from .tensor.math import cumprod  # noqa: F401
174
from .tensor.math import exp  # noqa: F401
R
ronnywang 已提交
175
from .tensor.math import expm1  # noqa: F401
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
from .tensor.math import floor  # noqa: F401
from .tensor.math import increment  # noqa: F401
from .tensor.math import log  # noqa: F401
from .tensor.math import log2  # noqa: F401
from .tensor.math import log10  # noqa: F401
from .tensor.math import multiplex  # noqa: F401
from .tensor.math import pow  # noqa: F401
from .tensor.math import reciprocal  # noqa: F401
from .tensor.math import all  # noqa: F401
from .tensor.math import any  # noqa: F401
from .tensor.math import round  # noqa: F401
from .tensor.math import rsqrt  # noqa: F401
from .tensor.math import scale  # noqa: F401
from .tensor.math import sign  # noqa: F401
from .tensor.math import sin  # noqa: F401
from .tensor.math import sinh  # noqa: F401
from .tensor.math import sqrt  # noqa: F401
from .tensor.math import square  # noqa: F401
from .tensor.math import stanh  # noqa: F401
from .tensor.math import sum  # noqa: F401
from .tensor.math import tanh  # noqa: F401
from .tensor.math import tanh_  # noqa: F401
from .tensor.math import add_n  # noqa: F401
from .tensor.math import max  # noqa: F401
from .tensor.math import maximum  # noqa: F401
from .tensor.math import min  # noqa: F401
from .tensor.math import minimum  # noqa: F401
from .tensor.math import mm  # noqa: F401
from .tensor.math import divide  # noqa: F401
from .tensor.math import floor_divide  # noqa: F401
from .tensor.math import remainder  # noqa: F401
from .tensor.math import mod  # noqa: F401
from .tensor.math import floor_mod  # noqa: F401
from .tensor.math import multiply  # noqa: F401
from .tensor.math import add  # noqa: F401
from .tensor.math import subtract  # noqa: F401
from .tensor.math import logsumexp  # noqa: F401
from .tensor.math import inverse  # noqa: F401
from .tensor.math import log1p  # noqa: F401
from .tensor.math import erf  # noqa: F401
from .tensor.math import addmm  # noqa: F401
from .tensor.math import clip  # noqa: F401
from .tensor.math import trace  # noqa: F401
219
from .tensor.math import diagonal  # noqa: F401
220 221 222 223 224 225 226
from .tensor.math import kron  # noqa: F401
from .tensor.math import isfinite  # noqa: F401
from .tensor.math import isinf  # noqa: F401
from .tensor.math import isnan  # noqa: F401
from .tensor.math import prod  # noqa: F401
from .tensor.math import broadcast_shape  # noqa: F401
from .tensor.math import conj  # noqa: F401
227
from .tensor.math import trunc  # noqa: F401
Z
zyfncg 已提交
228
from .tensor.math import digamma  # noqa: F401
229 230
from .tensor.math import neg  # noqa: F401
from .tensor.math import lgamma  # noqa: F401
L
Leo Chen 已提交
231

232 233 234 235 236 237 238 239 240 241 242
from .tensor.random import multinomial  # noqa: F401
from .tensor.random import standard_normal  # noqa: F401
from .tensor.random import normal  # noqa: F401
from .tensor.random import uniform  # noqa: F401
from .tensor.random import randn  # noqa: F401
from .tensor.random import rand  # noqa: F401
from .tensor.random import randint  # noqa: F401
from .tensor.random import randperm  # noqa: F401
from .tensor.search import argmax  # noqa: F401
from .tensor.search import argmin  # noqa: F401
from .tensor.search import argsort  # noqa: F401
Y
Yanxing Shi 已提交
243
from .tensor.search import searchsorted  # noqa: F401
244 245 246 247 248 249
from .tensor.search import masked_select  # noqa: F401
from .tensor.search import topk  # noqa: F401
from .tensor.search import where  # noqa: F401
from .tensor.search import index_select  # noqa: F401
from .tensor.search import nonzero  # noqa: F401
from .tensor.search import sort  # noqa: F401
250

251
from .tensor.to_string import set_printoptions  # noqa: F401
252

T
Tongxin Bai 已提交
253 254
from .tensor.einsum import einsum  # noqa: F401

255 256 257 258 259 260 261 262 263
from .framework.random import seed  # noqa: F401
from .framework.random import get_cuda_rng_state  # noqa: F401
from .framework.random import set_cuda_rng_state  # noqa: F401
from .framework import ParamAttr  # noqa: F401
from .framework import create_parameter  # noqa: F401
from .framework import CPUPlace  # noqa: F401
from .framework import CUDAPlace  # noqa: F401
from .framework import NPUPlace  # noqa: F401
from .framework import CUDAPinnedPlace  # noqa: F401
264

C
chentianyu03 已提交
265 266 267
from .autograd import grad  # noqa: F401
from .autograd import no_grad  # noqa: F401
from .autograd import set_grad_enabled  # noqa: F401
268 269 270
from .framework import save  # noqa: F401
from .framework import load  # noqa: F401
from .framework import DataParallel  # noqa: F401
271

272 273
from .framework import set_default_dtype  # noqa: F401
from .framework import get_default_dtype  # noqa: F401
274

275 276 277 278 279 280 281 282 283 284
from .tensor.search import index_sample  # noqa: F401
from .tensor.stat import mean  # noqa: F401
from .tensor.stat import std  # noqa: F401
from .tensor.stat import var  # noqa: F401
from .tensor.stat import numel  # noqa: F401
from .tensor.stat import median  # noqa: F401
from .device import get_cudnn_version  # noqa: F401
from .device import set_device  # noqa: F401
from .device import get_device  # noqa: F401
from .fluid.framework import is_compiled_with_cuda  # noqa: F401
285
from .fluid.framework import is_compiled_with_rocm  # noqa: F401
286
from .fluid.framework import disable_signal_handler  # noqa: F401
287 288
from .fluid.framework import get_flags  # noqa: F401
from .fluid.framework import set_flags  # noqa: F401
289 290 291
from .device import is_compiled_with_xpu  # noqa: F401
from .device import is_compiled_with_npu  # noqa: F401
from .device import XPUPlace  # noqa: F401
292

293 294 295 296
from .fluid.dygraph.base import enable_dygraph as disable_static  # noqa: F401
from .fluid.dygraph.base import disable_dygraph as enable_static  # noqa: F401
from .fluid.framework import in_dygraph_mode as in_dynamic_mode  # noqa: F401
from .fluid.layers import crop_tensor as crop  # noqa: F401
297 298

# high-level api
299
from .hapi import Model  # noqa: F401
Z
zhiboniu 已提交
300
from . import callbacks  # noqa: F401
301 302
from .hapi import summary  # noqa: F401
from .hapi import flops  # noqa: F401
Z
zhiboniu 已提交
303
from . import hub  # noqa: F401
304
from . import linalg  # noqa: F401
305

306 307
import paddle.text  # noqa: F401
import paddle.vision  # noqa: F401
P
pangyoki 已提交
308

309
from .tensor.random import check_shape  # noqa: F401
P
pangyoki 已提交
310
disable_static()
311

312
__all__ = [  # noqa
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
           'dtype',
           'uint8',
           'int8',
           'int16',
           'int32',
           'int64',
           'float16',
           'float32',
           'float64',
           'bfloat16',
           'bool',
           'complex64',
           'complex128',
           'addmm',
           'allclose',
           't',
           'add',
           'subtract',
           'diag',
L
Li Min 已提交
332
           'diagflat',
333 334 335 336 337 338 339 340 341 342 343
           'isnan',
           'scatter_nd_add',
           'unstack',
           'get_default_dtype',
           'save',
           'multinomial',
           'get_cuda_rng_state',
           'rank',
           'empty_like',
           'eye',
           'cumsum',
H
hlygit66666 已提交
344
           'cumprod',
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
           'sign',
           'is_empty',
           'equal',
           'equal_all',
           'is_tensor',
           'cross',
           'where',
           'log1p',
           'cos',
           'tan',
           'mean',
           'mv',
           'in_dynamic_mode',
           'min',
           'any',
           'slice',
           'normal',
           'logsumexp',
           'full',
           'unsqueeze',
           'unsqueeze_',
           'argmax',
           'Model',
           'summary',
           'flops',
           'sort',
Y
Yanxing Shi 已提交
371
           'searchsorted',
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
           'split',
           'logical_and',
           'full_like',
           'less_than',
           'kron',
           'clip',
           'Tensor',
           'crop',
           'ParamAttr',
           'stanh',
           'randint',
           'assign',
           'gather',
           'scale',
           'zeros',
           'rsqrt',
           'squeeze',
           'squeeze_',
           'to_tensor',
           'gather_nd',
           'isinf',
           'uniform',
           'floor_divide',
           'remainder',
           'floor_mod',
           'roll',
           'batch',
           'max',
           'norm',
           'logical_or',
402 403 404 405
           'bitwise_and',
           'bitwise_or',
           'bitwise_xor',
           'bitwise_not',
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
           'mm',
           'flip',
           'histogram',
           'multiplex',
           'CUDAPlace',
           'NPUPlace',
           'empty',
           'shape',
           'real',
           'imag',
           'reciprocal',
           'rand',
           'less_equal',
           'triu',
           'sin',
           'dist',
           'unbind',
           'meshgrid',
           'arange',
           'load',
           'numel',
           'median',
           'inverse',
           'no_grad',
           'set_grad_enabled',
           'mod',
           'abs',
           'tril',
           'pow',
           'zeros_like',
           'maximum',
           'topk',
           'index_select',
           'CPUPlace',
           'matmul',
           'seed',
           'acos',
           'logical_xor',
           'exp',
R
ronnywang 已提交
445
           'expm1',
446 447 448 449 450 451 452 453
           'bernoulli',
           'sinh',
           'round',
           'DataParallel',
           'argmin',
           'prod',
           'broadcast_shape',
           'conj',
454 455
           'neg',
           'lgamma',
456 457 458 459
           'square',
           'divide',
           'ceil',
           'atan',
R
ronnywang 已提交
460
           'atan2',
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
           'expand',
           'broadcast_to',
           'ones_like',
           'index_sample',
           'cast',
           'grad',
           'all',
           'ones',
           'not_equal',
           'sum',
           'tile',
           'greater_equal',
           'isfinite',
           'create_parameter',
           'dot',
           'increment',
           'erf',
           'bmm',
           'chunk',
           'tolist',
           'greater_than',
           'shard_index',
           'argsort',
           'tanh',
           'tanh_',
           'transpose',
           'randn',
           'strided_slice',
           'unique',
D
duanboqiang 已提交
490
           'unique_consecutive',
491 492 493 494 495 496 497 498 499 500 501 502 503
           'set_cuda_rng_state',
           'set_printoptions',
           'std',
           'flatten',
           'asin',
           'multiply',
           'disable_static',
           'masked_select',
           'var',
           'trace',
           'enable_static',
           'scatter_nd',
           'set_default_dtype',
504
           'disable_signal_handler',
505 506 507 508
           'expand_as',
           'stack',
           'sqrt',
           'cholesky',
509
           'matrix_power',
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
           'randperm',
           'linspace',
           'reshape',
           'reshape_',
           'reverse',
           'nonzero',
           'CUDAPinnedPlace',
           'logical_not',
           'add_n',
           'minimum',
           'scatter',
           'scatter_',
           'floor',
           'cosh',
           'log',
           'log2',
           'log10',
           'concat',
528
           'check_shape',
Z
zhiboniu 已提交
529
           'trunc',
Z
zyfncg 已提交
530
           'digamma',
531
           'standard_normal',
532 533
           'diagonal',
           'broadcast_tensors',
534 535 536
           'einsum',
           'set_flags',
           'get_flags'
537
]