diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 7bac330376c44fb9632258b81ccb00255ab33a7c..e4cca3d459c4ce1a4c13162a18ea627b012954f4 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -21,8 +21,7 @@ except ImportError: import paddle from the source directory; please install paddlepaddle*.whl firstly.''' ) -import paddle.batch -batch = batch.batch +from .batch import batch # noqa: F401 from .fluid import monkey_patch_variable from .fluid.dygraph import monkey_patch_math_varbase monkey_patch_variable() @@ -135,7 +134,6 @@ from .tensor.manipulation import squeeze # noqa: F401 from .tensor.manipulation import squeeze_ # noqa: F401 from .tensor.manipulation import stack # noqa: F401 from .tensor.manipulation import strided_slice # noqa: F401 -from .tensor.manipulation import transpose # noqa: F401 from .tensor.manipulation import unique # noqa: F401 from .tensor.manipulation import unsqueeze # noqa: F401 from .tensor.manipulation import unsqueeze_ # noqa: F401 @@ -191,7 +189,6 @@ from .tensor.math import floor_mod # noqa: F401 from .tensor.math import multiply # noqa: F401 from .tensor.math import add # noqa: F401 from .tensor.math import subtract # noqa: F401 -from .tensor.math import atan # noqa: F401 from .tensor.math import logsumexp # noqa: F401 from .tensor.math import inverse # noqa: F401 from .tensor.math import log1p # noqa: F401 @@ -244,9 +241,8 @@ from .framework import save # noqa: F401 from .framework import load # noqa: F401 from .framework import DataParallel # noqa: F401 -from .framework import set_default_dtype #DEFINE_ALIAS -from .framework import get_default_dtype #DEFINE_ALIAS -from .framework import set_grad_enabled #DEFINE_ALIAS +from .framework import set_default_dtype # noqa: F401 +from .framework import get_default_dtype # noqa: F401 from .tensor.search import index_sample # noqa: F401 from .tensor.stat import mean # noqa: F401 @@ -281,7 +277,7 @@ import paddle.vision # noqa: F401 from .tensor.random import check_shape # noqa: F401 disable_static() -__all__ = [ #noqa +__all__ = [ # noqa 'dtype', 'uint8', 'int8', @@ -323,7 +319,6 @@ __all__ = [ #noqa 'cos', 'tan', 'mean', - 'XPUPlace', 'mv', 'in_dynamic_mode', 'min', @@ -360,7 +355,6 @@ __all__ = [ #noqa 'to_tensor', 'gather_nd', 'isinf', - 'set_device', 'uniform', 'floor_divide', 'remainder', @@ -384,8 +378,6 @@ __all__ = [ #noqa 'rand', 'less_equal', 'triu', - 'is_compiled_with_cuda', - 'is_compiled_with_rocm', 'sin', 'dist', 'unbind', @@ -414,8 +406,6 @@ __all__ = [ #noqa 'bernoulli', 'summary', 'sinh', - 'is_compiled_with_xpu', - 'is_compiled_with_npu', 'round', 'DataParallel', 'argmin', @@ -437,7 +427,6 @@ __all__ = [ #noqa 'not_equal', 'sum', 'tile', - 'get_device', 'greater_equal', 'isfinite', 'create_parameter', @@ -470,7 +459,6 @@ __all__ = [ #noqa 'scatter_nd', 'set_default_dtype', 'expand_as', - 'get_cudnn_version', 'stack', 'sqrt', 'cholesky', @@ -484,7 +472,6 @@ __all__ = [ #noqa 'logical_not', 'add_n', 'minimum', - 'ComplexTensor', 'scatter', 'scatter_', 'floor', @@ -493,5 +480,6 @@ __all__ = [ #noqa 'log2', 'log10', 'concat', - 'check_shape' + 'check_shape', + 'standard_normal' ] diff --git a/python/paddle/amp/__init__.py b/python/paddle/amp/__init__.py index 32587938512c44df82cfa7353ac45a6cc3094186..64992752b2e8d8fcc90308b3f61c6a55abd01bc5 100644 --- a/python/paddle/amp/__init__.py +++ b/python/paddle/amp/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .auto_cast import auto_cast -from .grad_scaler import GradScaler +from .auto_cast import auto_cast # noqa: F401 +from .grad_scaler import GradScaler # noqa: F401 __all__ = ['auto_cast', 'GradScaler'] diff --git a/python/paddle/amp/auto_cast.py b/python/paddle/amp/auto_cast.py index b83f81b27d1a0745c7a2f3339bc3939eb2f19490..974f718c2d4e2319c2f74783e285a4eb9365c80e 100644 --- a/python/paddle/amp/auto_cast.py +++ b/python/paddle/amp/auto_cast.py @@ -14,7 +14,7 @@ from paddle.fluid.dygraph.amp import amp_guard -__all__ = ['auto_cast'] +__all__ = [] def auto_cast(enable=True, custom_white_list=None, custom_black_list=None): diff --git a/python/paddle/amp/grad_scaler.py b/python/paddle/amp/grad_scaler.py index 72a67a92c495863aba62bdaa93811e59780ed846..770b660a9e11ff4fad06deec7b2f4bbbdf1964a8 100644 --- a/python/paddle/amp/grad_scaler.py +++ b/python/paddle/amp/grad_scaler.py @@ -14,7 +14,7 @@ from paddle.fluid.dygraph.amp import AmpScaler -__all__ = ['GradScaler'] +__all__ = [] class GradScaler(AmpScaler): diff --git a/python/paddle/autograd/__init__.py b/python/paddle/autograd/__init__.py index 71110e95817879fa55bcfa98293139a29b79997a..569619f065a051d071eb8be6b8d8f63049b20d2f 100644 --- a/python/paddle/autograd/__init__.py +++ b/python/paddle/autograd/__init__.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ..fluid.dygraph.base import grad #DEFINE_ALIAS - -from . import backward_mode -from .backward_mode import backward -from .py_layer import PyLayer, PyLayerContext +from ..fluid.dygraph.base import grad # noqa: F401 +from . import backward_mode # noqa: F401 +from .backward_mode import backward # noqa: F401 +from .py_layer import PyLayer, PyLayerContext # noqa: F401 __all__ = ['grad', 'backward', 'PyLayer', 'PyLayerContext'] diff --git a/python/paddle/autograd/backward_mode.py b/python/paddle/autograd/backward_mode.py index 96e4336abaa6fa9ca5f23a56c551b8002c347888..6efbe777d537cadbe07a3bf21d807799e1227439 100644 --- a/python/paddle/autograd/backward_mode.py +++ b/python/paddle/autograd/backward_mode.py @@ -15,7 +15,7 @@ from paddle.fluid import core from paddle.fluid import framework import paddle -__all__ = ['backward'] +__all__ = [] @framework.dygraph_only diff --git a/python/paddle/autograd/py_layer.py b/python/paddle/autograd/py_layer.py index 35e2cd24391775c6e9144d555e68ab12295385b6..5a22d22151a1cd12b68fc3672faec965f399d5fd 100644 --- a/python/paddle/autograd/py_layer.py +++ b/python/paddle/autograd/py_layer.py @@ -15,7 +15,7 @@ import paddle from paddle.fluid.framework import dygraph_only from paddle.fluid import core -__all__ = ['PyLayer', 'PyLayerContext'] +__all__ = [] class PyLayerContext(object): diff --git a/python/paddle/batch.py b/python/paddle/batch.py index f6d2d8eb288744acab6d1c2f9d2a9db9a3087f58..f787f603f7e3ae0a6aa205596add48d192f54451 100644 --- a/python/paddle/batch.py +++ b/python/paddle/batch.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__all__ = ['batch'] +__all__ = [] def batch(reader, batch_size, drop_last=False): @@ -35,11 +35,11 @@ def batch(reader, batch_size, drop_last=False): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle def reader(): for i in range(10): yield i - batch_reader = fluid.io.batch(reader, batch_size=2) + batch_reader = paddle.batch(reader, batch_size=2) for data in batch_reader(): print(data) @@ -60,7 +60,7 @@ def batch(reader, batch_size, drop_last=False): if len(b) == batch_size: yield b b = [] - if drop_last == False and len(b) != 0: + if drop_last is False and len(b) != 0: yield b # Batch size check diff --git a/python/paddle/compat.py b/python/paddle/compat.py index 7c753815c5ccd32cfa65668b71b22e19b381b2b6..886a787623ed18cd4b7a56f7ac6661f5f7bc6800 100644 --- a/python/paddle/compat.py +++ b/python/paddle/compat.py @@ -15,18 +15,11 @@ import six import math -__all__ = [ - 'long_type', - 'to_text', - 'to_bytes', - 'round', - 'floor_division', - 'get_exception_message', -] +__all__ = [] if six.PY2: int_type = int - long_type = long + long_type = long # noqa: F821 else: int_type = int long_type = int diff --git a/python/paddle/device.py b/python/paddle/device.py index 85b813a7f51b56ec953df55a88e1e8288399ce1b..93e439ecf0aa420f178d646047a1b72c17189a65 100644 --- a/python/paddle/device.py +++ b/python/paddle/device.py @@ -18,21 +18,16 @@ import os from paddle.fluid import core from paddle.fluid import framework from paddle.fluid.dygraph.parallel import ParallelEnv -from paddle.fluid.framework import is_compiled_with_cuda #DEFINE_ALIAS -from paddle.fluid.framework import is_compiled_with_rocm #DEFINE_ALIAS +from paddle.fluid.framework import is_compiled_with_cuda # noqa: F401 +from paddle.fluid.framework import is_compiled_with_rocm # noqa: F401 -__all__ = [ + +__all__ = [ # npqa 'get_cudnn_version', 'set_device', 'get_device', 'XPUPlace', - 'is_compiled_with_xpu' - # 'cpu_places', - # 'CPUPlace', - # 'cuda_pinned_places', - # 'cuda_places', - # 'CUDAPinnedPlace', - # 'CUDAPlace', + 'is_compiled_with_xpu', 'is_compiled_with_cuda', 'is_compiled_with_rocm', 'is_compiled_with_npu' @@ -68,7 +63,7 @@ def is_compiled_with_xpu(): .. code-block:: python import paddle - support_xpu = paddle.device.is_compiled_with_xpu() + support_xpu = paddle.is_compiled_with_xpu() """ return core.is_compiled_with_xpu() @@ -82,9 +77,10 @@ def XPUPlace(dev_id): Examples: .. code-block:: python - + # required: xpu + import paddle - place = paddle.device.XPUPlace(0) + place = paddle.XPUPlace(0) """ return core.XPUPlace(dev_id) @@ -127,15 +123,13 @@ def _convert_to_place(device): place = core.CPUPlace() elif lower_device == 'gpu': if not core.is_compiled_with_cuda(): - raise ValueError( - "The device should not be 'gpu', " \ - "since PaddlePaddle is not compiled with CUDA") + raise ValueError("The device should not be 'gpu', " + "since PaddlePaddle is not compiled with CUDA") place = core.CUDAPlace(ParallelEnv().dev_id) elif lower_device == 'xpu': if not core.is_compiled_with_xpu(): - raise ValueError( - "The device should not be 'xpu', " \ - "since PaddlePaddle is not compiled with XPU") + raise ValueError("The device should not be 'xpu', " + "since PaddlePaddle is not compiled with XPU") selected_xpus = os.getenv("FLAGS_selected_xpus", "0").split(",") device_id = int(selected_xpus[0]) place = core.XPUPlace(device_id) @@ -149,7 +143,7 @@ def _convert_to_place(device): if avaliable_gpu_device: if not core.is_compiled_with_cuda(): raise ValueError( - "The device should not be {}, since PaddlePaddle is " \ + "The device should not be {}, since PaddlePaddle is " "not compiled with CUDA".format(avaliable_gpu_device)) device_info_list = device.split(':', 1) device_id = device_info_list[1] @@ -158,7 +152,7 @@ def _convert_to_place(device): if avaliable_xpu_device: if not core.is_compiled_with_xpu(): raise ValueError( - "The device should not be {}, since PaddlePaddle is " \ + "The device should not be {}, since PaddlePaddle is " "not compiled with XPU".format(avaliable_xpu_device)) device_info_list = device.split(':', 1) device_id = device_info_list[1] diff --git a/python/paddle/distributed/parallel.py b/python/paddle/distributed/parallel.py index bc042e722947a0f0293f655a5d58f7823c0d0d03..efe747408428a68772726c28af469b975836511e 100644 --- a/python/paddle/distributed/parallel.py +++ b/python/paddle/distributed/parallel.py @@ -29,9 +29,7 @@ from paddle.fluid.dygraph import parallel_helper from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.distributed.fleet.base.private_helper_function import wait_server_ready # noqa: F401 -__all__ = [ #noqa - "init_parallel_env" -] +__all__ = [] ParallelStrategy = core.ParallelStrategy @@ -152,7 +150,6 @@ def init_parallel_env(): init_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0")) if init_gloo: ep_rank_0 = parallel_env.trainer_endpoints[0].split(":") - ep_rank = parallel_env.trainer_endpoints[parallel_env.rank].split(":") manager = Manager() # glboal dict to store status http_server_d = manager.dict() diff --git a/python/paddle/incubate/__init__.py b/python/paddle/incubate/__init__.py index 03e5a88624086b8781a1d8bee4437d9a17c98f76..22769053b1ac97508027910db28d91c526adead3 100644 --- a/python/paddle/incubate/__init__.py +++ b/python/paddle/incubate/__init__.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import optimizer -from . import checkpoint -from ..fluid.layer_helper import LayerHelper +from .optimizer import LookAhead # noqa: F401 +from .optimizer import ModelAverage # noqa: F401 +from .checkpoint import auto_checkpoint # noqa: F401 +from ..fluid.layer_helper import LayerHelper # noqa: F401 -__all__ = [] -__all__ += optimizer.__all__ -__all__ += checkpoint.__all__ +__all__ = [ # noqa + 'LookAhead', 'ModelAverage' +] diff --git a/python/paddle/incubate/checkpoint/__init__.py b/python/paddle/incubate/checkpoint/__init__.py index 7ddd256df747981019e3afb0bb1dd839cf3ea550..79e6259de0275410664b9bfb2c34c33e21c5d529 100644 --- a/python/paddle/incubate/checkpoint/__init__.py +++ b/python/paddle/incubate/checkpoint/__init__.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ...fluid.incubate.checkpoint import auto_checkpoint +from ...fluid.incubate.checkpoint import auto_checkpoint # noqa: F401 -__all__ = ["auto_checkpoint"] +__all__ = [] diff --git a/python/paddle/incubate/optimizer/__init__.py b/python/paddle/incubate/optimizer/__init__.py index 4a3889d0ee1a905a534f33909b4241f5c91be2f5..d966d187f288ac0865109cf361dd310328792aaf 100644 --- a/python/paddle/incubate/optimizer/__init__.py +++ b/python/paddle/incubate/optimizer/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .lookahead import LookAhead -from .modelaverage import ModelAverage +from .lookahead import LookAhead # noqa: F401 +from .modelaverage import ModelAverage # noqa: F401 -__all__ = ['LookAhead', 'ModelAverage'] +__all__ = [] diff --git a/python/paddle/incubate/optimizer/lookahead.py b/python/paddle/incubate/optimizer/lookahead.py index f90d520a5dfe8adc524ef20b7489ea008fb9c51a..720a84a24f0aa65c833939844c53e871b4e0680b 100644 --- a/python/paddle/incubate/optimizer/lookahead.py +++ b/python/paddle/incubate/optimizer/lookahead.py @@ -20,7 +20,7 @@ import paddle import numpy as np from paddle.fluid.dygraph import base as imperative_base -__all__ = ["LookAhead"] +__all__ = [] class LookAhead(Optimizer): @@ -99,7 +99,7 @@ class LookAhead(Optimizer): layer = LinearNet() loss_fn = nn.CrossEntropyLoss() optimizer = paddle.optimizer.SGD(learning_rate=0.1, parameters=layer.parameters()) - lookahead = paddle.incubate.optimizer.LookAhead(optimizer, alpha=0.2, k=5) + lookahead = paddle.incubate.LookAhead(optimizer, alpha=0.2, k=5) # create data loader dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) @@ -163,7 +163,7 @@ class LookAhead(Optimizer): out = linear(inp) loss = paddle.mean(out) sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) - lookahead = paddle.incubate.optimizer.LookAhead(sgd, alpha=0.2, k=5) + lookahead = paddle.incubate.LookAhead(sgd, alpha=0.2, k=5) loss.backward() lookahead.step() lookahead.clear_grad() @@ -274,7 +274,7 @@ class LookAhead(Optimizer): out = linear(inp) loss = paddle.mean(out) sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) - lookahead = paddle.incubate.optimizer.LookAhead(sgd, alpha=0.2, k=5) + lookahead = paddle.incubate.LookAhead(sgd, alpha=0.2, k=5) loss.backward() lookahead.minimize(loss) lookahead.clear_grad() @@ -282,9 +282,6 @@ class LookAhead(Optimizer): """ assert isinstance(loss, Variable), "The loss should be an Tensor." - parameter_list = parameters if parameters \ - else self._parameter_list - # Apply inner optimizer to the main_program optimize_ops, params_grads = self.inner_optimizer.minimize( loss, diff --git a/python/paddle/incubate/optimizer/modelaverage.py b/python/paddle/incubate/optimizer/modelaverage.py index 8afcaf9207e7cc84d143356b4f5efb74a175f2bd..8ffc3bdac62d040ccd45fe9768fdf566e784dcc4 100644 --- a/python/paddle/incubate/optimizer/modelaverage.py +++ b/python/paddle/incubate/optimizer/modelaverage.py @@ -21,7 +21,7 @@ import numpy as np from paddle.fluid.dygraph import base as imperative_base from paddle.fluid.wrapped_decorator import signature_safe_contextmanager -__all__ = ["ModelAverage"] +__all__ = [] class ModelAverage(Optimizer): @@ -129,7 +129,7 @@ class ModelAverage(Optimizer): layer = LinearNet() loss_fn = nn.CrossEntropyLoss() optimizer = opt.Momentum(learning_rate=0.2, momentum=0.1, parameters=layer.parameters()) - model_average = paddle.incubate.optimizer.ModelAverage(0.15, + model_average = paddle.incubate.ModelAverage(0.15, parameters=layer.parameters(), min_average_window=2, max_average_window=10) @@ -313,7 +313,7 @@ class ModelAverage(Optimizer): sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) sgd.minimize(loss) - modelaverage = paddle.incubate.optimizer.ModelAverage(0.15, + modelaverage = paddle.incubate.ModelAverage(0.15, parameters=linear.parameters(), min_average_window=2, max_average_window=4) @@ -345,7 +345,7 @@ class ModelAverage(Optimizer): out = linear(inp) loss = paddle.mean(out) sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) - modelaverage = paddle.incubate.optimizer.ModelAverage(0.15, + modelaverage = paddle.incubate.ModelAverage(0.15, parameters=linear.parameters(), min_average_window=2, max_average_window=4) @@ -395,7 +395,7 @@ class ModelAverage(Optimizer): sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) - modelaverage = paddle.incubate.optimizer.ModelAverage(0.15, + modelaverage = paddle.incubate.ModelAverage(0.15, parameters=linear.parameters(), min_average_window=2, max_average_window=4) @@ -415,7 +415,6 @@ class ModelAverage(Optimizer): param) old_num_accumulates = self._get_accumulator( 'old_num_accumulates', param) - num_updates = self._get_accumulator('num_updates', param) sum_1 = self._get_accumulator('sum_1', param) sum_2 = self._get_accumulator('sum_2', param) sum_3 = self._get_accumulator('sum_3', param) @@ -467,7 +466,7 @@ class ModelAverage(Optimizer): sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) - modelaverage = paddle.incubate.optimizer.ModelAverage(0.15, + modelaverage = paddle.incubate.ModelAverage(0.15, parameters=linear.parameters(), min_average_window=2, max_average_window=4) @@ -506,17 +505,15 @@ class ModelAverage(Optimizer): self._get_accumulator('num_accumulates', param)) old_num_accumulates = block._clone_variable( self._get_accumulator('old_num_accumulates', param)) - num_updates = block._clone_variable( - self._get_accumulator('num_updates', param)) # backup param value to grad layers.assign(input=param, output=grad) # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates) tmp = layers.sum(x=[num_accumulates, old_num_accumulates]) sum = layers.sum(x=[sum_1, sum_2, sum_3]) tmp = layers.cast( - x=tmp, dtype='float32' if self._dtype == None else self._dtype) + x=tmp, dtype='float32' if self._dtype is None else self._dtype) sum = layers.cast( - x=sum, dtype='float32' if self._dtype == None else self._dtype) + x=sum, dtype='float32' if self._dtype is None else self._dtype) layers.ops._elementwise_div(x=sum, y=tmp, out=param) def _add_average_restore_op(self, block, param): diff --git a/python/paddle/inference/__init__.py b/python/paddle/inference/__init__.py index c388301ec3408e436eacb2567e8e529d0bbc03bb..4e172039716628a157c8324c17ff2d4be3666349 100644 --- a/python/paddle/inference/__init__.py +++ b/python/paddle/inference/__init__.py @@ -12,5 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ..fluid.inference import Config, DataType, PlaceType, PrecisionType, Tensor, \ - Predictor, create_predictor, get_version, get_num_bytes_of_data_type, PredictorPool +from ..fluid.inference import Config # noqa: F401 +from ..fluid.inference import DataType # noqa: F401 +from ..fluid.inference import PlaceType # noqa: F401 +from ..fluid.inference import PrecisionType # noqa: F401 +from ..fluid.inference import Tensor # noqa: F401 +from ..fluid.inference import Predictor # noqa: F401 +from ..fluid.inference import create_predictor # noqa: F401 +from ..fluid.inference import get_version # noqa: F401 +from ..fluid.inference import get_num_bytes_of_data_type # noqa: F401 +from ..fluid.inference import PredictorPool # noqa: F401 + +__all__ = [ # noqa + 'Config', + 'DataType', + 'PlaceType', + 'PrecisionType', + 'Tensor', + 'Predictor', + 'create_predictor', + 'get_version', + 'get_num_bytes_of_data_type', + 'PredictorPool' +] diff --git a/python/paddle/jit/__init__.py b/python/paddle/jit/__init__.py index 650837b2d7702c70131250b9da94abd62b369e7a..576989e8e0d2aa019dc9ec7c7d69afa941f1dcb7 100644 --- a/python/paddle/jit/__init__.py +++ b/python/paddle/jit/__init__.py @@ -14,19 +14,26 @@ from __future__ import print_function -from ..fluid.dygraph.jit import save #DEFINE_ALIAS -from ..fluid.dygraph.jit import load #DEFINE_ALIAS -from ..fluid.dygraph.jit import TracedLayer #DEFINE_ALIAS -from ..fluid.dygraph.jit import set_code_level #DEFINE_ALIAS -from ..fluid.dygraph.jit import set_verbosity #DEFINE_ALIAS -from ..fluid.dygraph.jit import declarative as to_static #DEFINE_ALIAS -from ..fluid.dygraph.jit import not_to_static #DEFINE_ALIAS -from ..fluid.dygraph import ProgramTranslator #DEFINE_ALIAS -from ..fluid.dygraph.io import TranslatedLayer #DEFINE_ALIAS +from ..fluid.dygraph.jit import save # noqa: F401 +from ..fluid.dygraph.jit import load # noqa: F401 +from ..fluid.dygraph.jit import TracedLayer # noqa: F401 +from ..fluid.dygraph.jit import set_code_level # noqa: F401 +from ..fluid.dygraph.jit import set_verbosity # noqa: F401 +from ..fluid.dygraph.jit import declarative as to_static # noqa: F401 +from ..fluid.dygraph.jit import not_to_static # noqa: F401 +from ..fluid.dygraph import ProgramTranslator # noqa: F401 +from ..fluid.dygraph.io import TranslatedLayer # noqa: F401 -from . import dy2static +from . import dy2static # noqa: F401 -__all__ = [ - 'save', 'load', 'TracedLayer', 'to_static', 'ProgramTranslator', - 'TranslatedLayer', 'set_code_level', 'set_verbosity', 'not_to_static' +__all__ = [ # noqa + 'save', + 'load', + 'TracedLayer', + 'to_static', + 'ProgramTranslator', + 'TranslatedLayer', + 'set_code_level', + 'set_verbosity', + 'not_to_static' ] diff --git a/python/paddle/jit/dy2static/__init__.py b/python/paddle/jit/dy2static/__init__.py index 239b554180b1bd74517b152dfdf079082600b806..030d5499c2ca96d997dfe571b81c039bb0eb2c99 100644 --- a/python/paddle/jit/dy2static/__init__.py +++ b/python/paddle/jit/dy2static/__init__.py @@ -12,18 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - -from . import convert_operators -from .convert_operators import * - -from . import convert_call_func -from .convert_call_func import * - -from . import variable_trans_func -from .variable_trans_func import * +from .convert_call_func import convert_call # noqa: F401 +from .convert_operators import cast_bool_if_necessary # noqa: F401 +from .convert_operators import convert_assert # noqa: F401 +from .convert_operators import convert_ifelse # noqa: F401 +from .convert_operators import convert_len # noqa: F401 +from .convert_operators import convert_logical_and # noqa: F401 +from .convert_operators import convert_logical_not # noqa: F401 +from .convert_operators import convert_logical_or # noqa: F401 +from .convert_operators import convert_pop # noqa: F401 +from .convert_operators import convert_print # noqa: F401 +from .convert_operators import convert_shape_compare # noqa: F401 +from .convert_operators import convert_var_dtype # noqa: F401 +from .convert_operators import convert_var_shape # noqa: F401 +from .convert_operators import convert_var_shape_simple # noqa: F401 +from .convert_operators import eval_if_exist_else_none # noqa: F401 +from .convert_operators import choose_shape_attr_or_api # noqa: F401 +from .convert_operators import convert_while_loop # noqa: F401 +from .variable_trans_func import create_bool_as_type # noqa: F401 +from .variable_trans_func import create_fill_constant_node # noqa: F401 +from .variable_trans_func import create_static_variable_gast_node # noqa: F401 +from .variable_trans_func import data_layer_not_check # noqa: F401 +from .variable_trans_func import to_static_variable # noqa: F401 +from .variable_trans_func import to_static_variable_gast_node # noqa: F401 __all__ = [] -__all__ += convert_operators.__all__ -__all__ += convert_call_func.__all__ -__all__ += variable_trans_func.__all__ diff --git a/python/paddle/jit/dy2static/convert_call_func.py b/python/paddle/jit/dy2static/convert_call_func.py index be2377608e36c75d95cb2c1c609e99cef7d438a7..4f6197a3cba6ae811998def0d59a221d2265ce0c 100644 --- a/python/paddle/jit/dy2static/convert_call_func.py +++ b/python/paddle/jit/dy2static/convert_call_func.py @@ -13,6 +13,6 @@ # limitations under the License. from __future__ import print_function -from ...fluid.dygraph.dygraph_to_static.convert_call_func import convert_call #DEFINE_ALIAS +from ...fluid.dygraph.dygraph_to_static.convert_call_func import convert_call # noqa: F401 -__all__ = ['convert_call'] +__all__ = [] diff --git a/python/paddle/jit/dy2static/convert_operators.py b/python/paddle/jit/dy2static/convert_operators.py index 9321cf4a0b8324cf5e312b36a17b8ab1edc72809..8d67e06d9b27a56e9aa0fc7bc57844290d1c83e1 100644 --- a/python/paddle/jit/dy2static/convert_operators.py +++ b/python/paddle/jit/dy2static/convert_operators.py @@ -13,27 +13,21 @@ # limitations under the License. from __future__ import print_function -from ...fluid.dygraph.dygraph_to_static.convert_operators import cast_bool_if_necessary #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_assert #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_ifelse #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_len #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_and #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_not #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_or #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_pop #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_print #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_shape_compare #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_dtype #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_shape #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_shape_simple #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import eval_if_exist_else_none #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import choose_shape_attr_or_api #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_while_loop #DEFINE_ALIAS +from ...fluid.dygraph.dygraph_to_static.convert_operators import cast_bool_if_necessary # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_assert # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_ifelse # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_len # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_and # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_not # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_or # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_pop # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_print # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_shape_compare # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_dtype # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_shape # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_shape_simple # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import eval_if_exist_else_none # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import choose_shape_attr_or_api # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_while_loop # noqa: F401 -__all__ = [ - 'cast_bool_if_necessary', 'convert_assert', 'convert_ifelse', 'convert_len', - 'convert_logical_and', 'convert_logical_not', 'convert_logical_or', - 'convert_pop', 'convert_print', 'convert_shape_compare', - 'convert_var_dtype', 'convert_var_shape', 'convert_var_shape_simple', - 'eval_if_exist_else_none', 'choose_shape_attr_or_api', 'convert_while_loop' -] +__all__ = [] diff --git a/python/paddle/jit/dy2static/variable_trans_func.py b/python/paddle/jit/dy2static/variable_trans_func.py index 2deb1bbb0eef2542d8f8890a7fa476f370ba5e5a..9ce2bc2da381655e65225397831faa228c613ca6 100644 --- a/python/paddle/jit/dy2static/variable_trans_func.py +++ b/python/paddle/jit/dy2static/variable_trans_func.py @@ -14,15 +14,11 @@ from __future__ import print_function -from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_bool_as_type #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_fill_constant_node #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_static_variable_gast_node #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.variable_trans_func import data_layer_not_check #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable_gast_node #DEFINE_ALIAS +from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_bool_as_type # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_fill_constant_node # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_static_variable_gast_node # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.variable_trans_func import data_layer_not_check # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable_gast_node # noqa: F401 -__all__ = [ - 'create_bool_as_type', 'create_fill_constant_node', - 'create_static_variable_gast_node', 'data_layer_not_check', - 'to_static_variable', 'to_static_variable_gast_node' -] +__all__ = [] diff --git a/python/paddle/metric/__init__.py b/python/paddle/metric/__init__.py index e41f6d76dd22159ab189654c6d30818c600b8286..2f2ef4c6f54269067406763a02e8f0772e86bc82 100644 --- a/python/paddle/metric/__init__.py +++ b/python/paddle/metric/__init__.py @@ -12,7 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .metrics import * -from . import metrics +from .metrics import Metric # noqa: F401 +from .metrics import Accuracy # noqa: F401 +from .metrics import Precision # noqa: F401 +from .metrics import Recall # noqa: F401 +from .metrics import Auc # noqa: F401 +from .metrics import accuracy # noqa: F401 -__all__ = metrics.__all__ +__all__ = [ #noqa + 'Metric', + 'Accuracy', + 'Precision', + 'Recall', + 'Auc', + 'accuracy' +] diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index d8e400b08bd474a1a59399e413a59e9a7d77f8b7..40758fb8dc3e0f034e4d5ea9ccf6e8d2897287e1 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -26,7 +26,7 @@ from ..fluid.layers.nn import topk from ..fluid.framework import core, _varbase_creator, in_dygraph_mode import paddle -__all__ = ['Metric', 'Accuracy', 'Precision', 'Recall', 'Auc', 'accuracy'] +__all__ = [] def _is_numpy_(var): diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index 7cf3f94872de17c12910ae5453f74a18c4a1502d..3ccb9e957f4e4d6426645d7188d67159c0454a01 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -286,5 +286,6 @@ __all__ = [ #noqa 'Swish', 'PixelShuffle', 'ELU', - 'ReLU6' + 'ReLU6', + 'LayerDict' ] diff --git a/python/paddle/nn/functional/__init__.py b/python/paddle/nn/functional/__init__.py index d4c17a27a61780b431916b2634585de035778ce8..ff18afa9d20282a6e1147cbca0d17580456c0c95 100644 --- a/python/paddle/nn/functional/__init__.py +++ b/python/paddle/nn/functional/__init__.py @@ -194,5 +194,6 @@ __all__ = [ #noqa 'embedding', 'gather_tree', 'one_hot', - 'normalize' + 'normalize', + 'temporal_shift' ] diff --git a/python/paddle/onnx/__init__.py b/python/paddle/onnx/__init__.py index 885d1968ce1ae1ef4f6a4ff79f8ac40acb971baa..8853e78bf3d808108d540496f1d7d9e1d09121c4 100644 --- a/python/paddle/onnx/__init__.py +++ b/python/paddle/onnx/__init__.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function -from .export import export +from .export import export # noqa: F401 __all__ = ['export'] diff --git a/python/paddle/onnx/export.py b/python/paddle/onnx/export.py index 4b99b42bb0423c676e8d08b1931c6488b8ab1e98..b8a217a5134fb8007f7563349c3efd40e132b0b2 100644 --- a/python/paddle/onnx/export.py +++ b/python/paddle/onnx/export.py @@ -15,7 +15,7 @@ import os from paddle.utils import try_import -__all__ = ['export'] +__all__ = [] def export(layer, path, input_spec=None, opset_version=9, **configs): diff --git a/python/paddle/static/__init__.py b/python/paddle/static/__init__.py index 688bff4a678f2af224a5c541eb2331569edb717e..93394f9b5afdef5cb8a4d4f355c36e59d974fd5a 100644 --- a/python/paddle/static/__init__.py +++ b/python/paddle/static/__init__.py @@ -85,11 +85,21 @@ __all__ = [ #noqa 'load', 'save_inference_model', 'load_inference_model', + 'serialize_program', + 'serialize_persistables', + 'save_to_file', + 'deserialize_program', + 'deserialize_persistables', + 'load_from_file', 'normalize_program', 'load_program_state', 'set_program_state', 'cpu_places', 'cuda_places', 'Variable', - 'create_global_var' + 'create_global_var', + 'accuracy', + 'auc', + 'device_guard', + 'create_parameter' ] diff --git a/python/paddle/static/nn/__init__.py b/python/paddle/static/nn/__init__.py index 416f6e4f3df06886dbd15c7a427b3620a1957842..b589d9f87895b73ba319499969c744f21f49c657 100644 --- a/python/paddle/static/nn/__init__.py +++ b/python/paddle/static/nn/__init__.py @@ -68,7 +68,6 @@ __all__ = [ #noqa 'conv2d_transpose', 'conv3d', 'conv3d_transpose', - 'create_parameter', 'crf_decoding', 'data_norm', 'deform_conv2d', diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index c8d80fc9bc68cbbff4e270bbab4d8203e663bb2e..5aeae126d83765c56d597357edae38221389d134 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -25,7 +25,6 @@ from .creation import ones_like # noqa: F401 from .creation import zeros # noqa: F401 from .creation import zeros_like # noqa: F401 from .creation import arange # noqa: F401 -from .creation import eye # noqa: F401 from .creation import full # noqa: F401 from .creation import full_like # noqa: F401 from .creation import triu # noqa: F401 @@ -82,7 +81,6 @@ from .manipulation import squeeze # noqa: F401 from .manipulation import squeeze_ # noqa: F401 from .manipulation import stack # noqa: F401 from .manipulation import strided_slice # noqa: F401 -from .manipulation import transpose # noqa: F401 from .manipulation import unique # noqa: F401 from .manipulation import unsqueeze # noqa: F401 from .manipulation import unsqueeze_ # noqa: F401 @@ -143,7 +141,6 @@ from .math import add # noqa: F401 from .math import add_ # noqa: F401 from .math import subtract # noqa: F401 from .math import subtract_ # noqa: F401 -from .math import atan # noqa: F401 from .math import logsumexp # noqa: F401 from .math import inverse # noqa: F401 from .math import log2 # noqa: F401 @@ -227,7 +224,6 @@ tensor_method_func = [ #noqa 'log2', 'log10', 'logsumexp', - 'mul', 'multiplex', 'pow', 'prod',