From e48f7a5b4601b80cb7962c8675d66e61c79cde04 Mon Sep 17 00:00:00 2001 From: zhiboniu <31800336+zhiboniu@users.noreply.github.com> Date: Fri, 11 Jun 2021 15:32:59 +0800 Subject: [PATCH] update 2.0 public api in all left files (#33314) * update 2.0 public api in all left files * reverse device.py all list; fix some flake8 errors --- python/paddle/__init__.py | 24 +++-------- python/paddle/amp/__init__.py | 4 +- python/paddle/amp/auto_cast.py | 2 +- python/paddle/amp/grad_scaler.py | 2 +- python/paddle/autograd/__init__.py | 9 ++--- python/paddle/autograd/backward_mode.py | 2 +- python/paddle/autograd/py_layer.py | 2 +- python/paddle/batch.py | 8 ++-- python/paddle/compat.py | 11 +---- python/paddle/device.py | 36 +++++++---------- python/paddle/distributed/parallel.py | 5 +-- python/paddle/incubate/__init__.py | 13 +++--- python/paddle/incubate/checkpoint/__init__.py | 4 +- python/paddle/incubate/optimizer/__init__.py | 6 +-- python/paddle/incubate/optimizer/lookahead.py | 11 ++--- .../paddle/incubate/optimizer/modelaverage.py | 19 ++++----- python/paddle/inference/__init__.py | 25 +++++++++++- python/paddle/jit/__init__.py | 33 +++++++++------ python/paddle/jit/dy2static/__init__.py | 36 +++++++++++------ .../paddle/jit/dy2static/convert_call_func.py | 4 +- .../paddle/jit/dy2static/convert_operators.py | 40 ++++++++----------- .../jit/dy2static/variable_trans_func.py | 18 ++++----- python/paddle/metric/__init__.py | 17 ++++++-- python/paddle/metric/metrics.py | 2 +- python/paddle/nn/__init__.py | 3 +- python/paddle/nn/functional/__init__.py | 3 +- python/paddle/onnx/__init__.py | 3 +- python/paddle/onnx/export.py | 2 +- python/paddle/static/__init__.py | 12 +++++- python/paddle/static/nn/__init__.py | 1 - python/paddle/tensor/__init__.py | 4 -- 31 files changed, 186 insertions(+), 175 deletions(-) diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 7bac330376..e4cca3d459 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -21,8 +21,7 @@ except ImportError: import paddle from the source directory; please install paddlepaddle*.whl firstly.''' ) -import paddle.batch -batch = batch.batch +from .batch import batch # noqa: F401 from .fluid import monkey_patch_variable from .fluid.dygraph import monkey_patch_math_varbase monkey_patch_variable() @@ -135,7 +134,6 @@ from .tensor.manipulation import squeeze # noqa: F401 from .tensor.manipulation import squeeze_ # noqa: F401 from .tensor.manipulation import stack # noqa: F401 from .tensor.manipulation import strided_slice # noqa: F401 -from .tensor.manipulation import transpose # noqa: F401 from .tensor.manipulation import unique # noqa: F401 from .tensor.manipulation import unsqueeze # noqa: F401 from .tensor.manipulation import unsqueeze_ # noqa: F401 @@ -191,7 +189,6 @@ from .tensor.math import floor_mod # noqa: F401 from .tensor.math import multiply # noqa: F401 from .tensor.math import add # noqa: F401 from .tensor.math import subtract # noqa: F401 -from .tensor.math import atan # noqa: F401 from .tensor.math import logsumexp # noqa: F401 from .tensor.math import inverse # noqa: F401 from .tensor.math import log1p # noqa: F401 @@ -244,9 +241,8 @@ from .framework import save # noqa: F401 from .framework import load # noqa: F401 from .framework import DataParallel # noqa: F401 -from .framework import set_default_dtype #DEFINE_ALIAS -from .framework import get_default_dtype #DEFINE_ALIAS -from .framework import set_grad_enabled #DEFINE_ALIAS +from .framework import set_default_dtype # noqa: F401 +from .framework import get_default_dtype # noqa: F401 from .tensor.search import index_sample # noqa: F401 from .tensor.stat import mean # noqa: F401 @@ -281,7 +277,7 @@ import paddle.vision # noqa: F401 from .tensor.random import check_shape # noqa: F401 disable_static() -__all__ = [ #noqa +__all__ = [ # noqa 'dtype', 'uint8', 'int8', @@ -323,7 +319,6 @@ __all__ = [ #noqa 'cos', 'tan', 'mean', - 'XPUPlace', 'mv', 'in_dynamic_mode', 'min', @@ -360,7 +355,6 @@ __all__ = [ #noqa 'to_tensor', 'gather_nd', 'isinf', - 'set_device', 'uniform', 'floor_divide', 'remainder', @@ -384,8 +378,6 @@ __all__ = [ #noqa 'rand', 'less_equal', 'triu', - 'is_compiled_with_cuda', - 'is_compiled_with_rocm', 'sin', 'dist', 'unbind', @@ -414,8 +406,6 @@ __all__ = [ #noqa 'bernoulli', 'summary', 'sinh', - 'is_compiled_with_xpu', - 'is_compiled_with_npu', 'round', 'DataParallel', 'argmin', @@ -437,7 +427,6 @@ __all__ = [ #noqa 'not_equal', 'sum', 'tile', - 'get_device', 'greater_equal', 'isfinite', 'create_parameter', @@ -470,7 +459,6 @@ __all__ = [ #noqa 'scatter_nd', 'set_default_dtype', 'expand_as', - 'get_cudnn_version', 'stack', 'sqrt', 'cholesky', @@ -484,7 +472,6 @@ __all__ = [ #noqa 'logical_not', 'add_n', 'minimum', - 'ComplexTensor', 'scatter', 'scatter_', 'floor', @@ -493,5 +480,6 @@ __all__ = [ #noqa 'log2', 'log10', 'concat', - 'check_shape' + 'check_shape', + 'standard_normal' ] diff --git a/python/paddle/amp/__init__.py b/python/paddle/amp/__init__.py index 3258793851..64992752b2 100644 --- a/python/paddle/amp/__init__.py +++ b/python/paddle/amp/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .auto_cast import auto_cast -from .grad_scaler import GradScaler +from .auto_cast import auto_cast # noqa: F401 +from .grad_scaler import GradScaler # noqa: F401 __all__ = ['auto_cast', 'GradScaler'] diff --git a/python/paddle/amp/auto_cast.py b/python/paddle/amp/auto_cast.py index b83f81b27d..974f718c2d 100644 --- a/python/paddle/amp/auto_cast.py +++ b/python/paddle/amp/auto_cast.py @@ -14,7 +14,7 @@ from paddle.fluid.dygraph.amp import amp_guard -__all__ = ['auto_cast'] +__all__ = [] def auto_cast(enable=True, custom_white_list=None, custom_black_list=None): diff --git a/python/paddle/amp/grad_scaler.py b/python/paddle/amp/grad_scaler.py index 72a67a92c4..770b660a9e 100644 --- a/python/paddle/amp/grad_scaler.py +++ b/python/paddle/amp/grad_scaler.py @@ -14,7 +14,7 @@ from paddle.fluid.dygraph.amp import AmpScaler -__all__ = ['GradScaler'] +__all__ = [] class GradScaler(AmpScaler): diff --git a/python/paddle/autograd/__init__.py b/python/paddle/autograd/__init__.py index 71110e9581..569619f065 100644 --- a/python/paddle/autograd/__init__.py +++ b/python/paddle/autograd/__init__.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ..fluid.dygraph.base import grad #DEFINE_ALIAS - -from . import backward_mode -from .backward_mode import backward -from .py_layer import PyLayer, PyLayerContext +from ..fluid.dygraph.base import grad # noqa: F401 +from . import backward_mode # noqa: F401 +from .backward_mode import backward # noqa: F401 +from .py_layer import PyLayer, PyLayerContext # noqa: F401 __all__ = ['grad', 'backward', 'PyLayer', 'PyLayerContext'] diff --git a/python/paddle/autograd/backward_mode.py b/python/paddle/autograd/backward_mode.py index 96e4336aba..6efbe777d5 100644 --- a/python/paddle/autograd/backward_mode.py +++ b/python/paddle/autograd/backward_mode.py @@ -15,7 +15,7 @@ from paddle.fluid import core from paddle.fluid import framework import paddle -__all__ = ['backward'] +__all__ = [] @framework.dygraph_only diff --git a/python/paddle/autograd/py_layer.py b/python/paddle/autograd/py_layer.py index 35e2cd2439..5a22d22151 100644 --- a/python/paddle/autograd/py_layer.py +++ b/python/paddle/autograd/py_layer.py @@ -15,7 +15,7 @@ import paddle from paddle.fluid.framework import dygraph_only from paddle.fluid import core -__all__ = ['PyLayer', 'PyLayerContext'] +__all__ = [] class PyLayerContext(object): diff --git a/python/paddle/batch.py b/python/paddle/batch.py index f6d2d8eb28..f787f603f7 100644 --- a/python/paddle/batch.py +++ b/python/paddle/batch.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__all__ = ['batch'] +__all__ = [] def batch(reader, batch_size, drop_last=False): @@ -35,11 +35,11 @@ def batch(reader, batch_size, drop_last=False): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle def reader(): for i in range(10): yield i - batch_reader = fluid.io.batch(reader, batch_size=2) + batch_reader = paddle.batch(reader, batch_size=2) for data in batch_reader(): print(data) @@ -60,7 +60,7 @@ def batch(reader, batch_size, drop_last=False): if len(b) == batch_size: yield b b = [] - if drop_last == False and len(b) != 0: + if drop_last is False and len(b) != 0: yield b # Batch size check diff --git a/python/paddle/compat.py b/python/paddle/compat.py index 7c753815c5..886a787623 100644 --- a/python/paddle/compat.py +++ b/python/paddle/compat.py @@ -15,18 +15,11 @@ import six import math -__all__ = [ - 'long_type', - 'to_text', - 'to_bytes', - 'round', - 'floor_division', - 'get_exception_message', -] +__all__ = [] if six.PY2: int_type = int - long_type = long + long_type = long # noqa: F821 else: int_type = int long_type = int diff --git a/python/paddle/device.py b/python/paddle/device.py index 85b813a7f5..93e439ecf0 100644 --- a/python/paddle/device.py +++ b/python/paddle/device.py @@ -18,21 +18,16 @@ import os from paddle.fluid import core from paddle.fluid import framework from paddle.fluid.dygraph.parallel import ParallelEnv -from paddle.fluid.framework import is_compiled_with_cuda #DEFINE_ALIAS -from paddle.fluid.framework import is_compiled_with_rocm #DEFINE_ALIAS +from paddle.fluid.framework import is_compiled_with_cuda # noqa: F401 +from paddle.fluid.framework import is_compiled_with_rocm # noqa: F401 -__all__ = [ + +__all__ = [ # npqa 'get_cudnn_version', 'set_device', 'get_device', 'XPUPlace', - 'is_compiled_with_xpu' - # 'cpu_places', - # 'CPUPlace', - # 'cuda_pinned_places', - # 'cuda_places', - # 'CUDAPinnedPlace', - # 'CUDAPlace', + 'is_compiled_with_xpu', 'is_compiled_with_cuda', 'is_compiled_with_rocm', 'is_compiled_with_npu' @@ -68,7 +63,7 @@ def is_compiled_with_xpu(): .. code-block:: python import paddle - support_xpu = paddle.device.is_compiled_with_xpu() + support_xpu = paddle.is_compiled_with_xpu() """ return core.is_compiled_with_xpu() @@ -82,9 +77,10 @@ def XPUPlace(dev_id): Examples: .. code-block:: python - + # required: xpu + import paddle - place = paddle.device.XPUPlace(0) + place = paddle.XPUPlace(0) """ return core.XPUPlace(dev_id) @@ -127,15 +123,13 @@ def _convert_to_place(device): place = core.CPUPlace() elif lower_device == 'gpu': if not core.is_compiled_with_cuda(): - raise ValueError( - "The device should not be 'gpu', " \ - "since PaddlePaddle is not compiled with CUDA") + raise ValueError("The device should not be 'gpu', " + "since PaddlePaddle is not compiled with CUDA") place = core.CUDAPlace(ParallelEnv().dev_id) elif lower_device == 'xpu': if not core.is_compiled_with_xpu(): - raise ValueError( - "The device should not be 'xpu', " \ - "since PaddlePaddle is not compiled with XPU") + raise ValueError("The device should not be 'xpu', " + "since PaddlePaddle is not compiled with XPU") selected_xpus = os.getenv("FLAGS_selected_xpus", "0").split(",") device_id = int(selected_xpus[0]) place = core.XPUPlace(device_id) @@ -149,7 +143,7 @@ def _convert_to_place(device): if avaliable_gpu_device: if not core.is_compiled_with_cuda(): raise ValueError( - "The device should not be {}, since PaddlePaddle is " \ + "The device should not be {}, since PaddlePaddle is " "not compiled with CUDA".format(avaliable_gpu_device)) device_info_list = device.split(':', 1) device_id = device_info_list[1] @@ -158,7 +152,7 @@ def _convert_to_place(device): if avaliable_xpu_device: if not core.is_compiled_with_xpu(): raise ValueError( - "The device should not be {}, since PaddlePaddle is " \ + "The device should not be {}, since PaddlePaddle is " "not compiled with XPU".format(avaliable_xpu_device)) device_info_list = device.split(':', 1) device_id = device_info_list[1] diff --git a/python/paddle/distributed/parallel.py b/python/paddle/distributed/parallel.py index bc042e7229..efe7474084 100644 --- a/python/paddle/distributed/parallel.py +++ b/python/paddle/distributed/parallel.py @@ -29,9 +29,7 @@ from paddle.fluid.dygraph import parallel_helper from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.distributed.fleet.base.private_helper_function import wait_server_ready # noqa: F401 -__all__ = [ #noqa - "init_parallel_env" -] +__all__ = [] ParallelStrategy = core.ParallelStrategy @@ -152,7 +150,6 @@ def init_parallel_env(): init_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0")) if init_gloo: ep_rank_0 = parallel_env.trainer_endpoints[0].split(":") - ep_rank = parallel_env.trainer_endpoints[parallel_env.rank].split(":") manager = Manager() # glboal dict to store status http_server_d = manager.dict() diff --git a/python/paddle/incubate/__init__.py b/python/paddle/incubate/__init__.py index 03e5a88624..22769053b1 100644 --- a/python/paddle/incubate/__init__.py +++ b/python/paddle/incubate/__init__.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import optimizer -from . import checkpoint -from ..fluid.layer_helper import LayerHelper +from .optimizer import LookAhead # noqa: F401 +from .optimizer import ModelAverage # noqa: F401 +from .checkpoint import auto_checkpoint # noqa: F401 +from ..fluid.layer_helper import LayerHelper # noqa: F401 -__all__ = [] -__all__ += optimizer.__all__ -__all__ += checkpoint.__all__ +__all__ = [ # noqa + 'LookAhead', 'ModelAverage' +] diff --git a/python/paddle/incubate/checkpoint/__init__.py b/python/paddle/incubate/checkpoint/__init__.py index 7ddd256df7..79e6259de0 100644 --- a/python/paddle/incubate/checkpoint/__init__.py +++ b/python/paddle/incubate/checkpoint/__init__.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ...fluid.incubate.checkpoint import auto_checkpoint +from ...fluid.incubate.checkpoint import auto_checkpoint # noqa: F401 -__all__ = ["auto_checkpoint"] +__all__ = [] diff --git a/python/paddle/incubate/optimizer/__init__.py b/python/paddle/incubate/optimizer/__init__.py index 4a3889d0ee..d966d187f2 100644 --- a/python/paddle/incubate/optimizer/__init__.py +++ b/python/paddle/incubate/optimizer/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .lookahead import LookAhead -from .modelaverage import ModelAverage +from .lookahead import LookAhead # noqa: F401 +from .modelaverage import ModelAverage # noqa: F401 -__all__ = ['LookAhead', 'ModelAverage'] +__all__ = [] diff --git a/python/paddle/incubate/optimizer/lookahead.py b/python/paddle/incubate/optimizer/lookahead.py index f90d520a5d..720a84a24f 100644 --- a/python/paddle/incubate/optimizer/lookahead.py +++ b/python/paddle/incubate/optimizer/lookahead.py @@ -20,7 +20,7 @@ import paddle import numpy as np from paddle.fluid.dygraph import base as imperative_base -__all__ = ["LookAhead"] +__all__ = [] class LookAhead(Optimizer): @@ -99,7 +99,7 @@ class LookAhead(Optimizer): layer = LinearNet() loss_fn = nn.CrossEntropyLoss() optimizer = paddle.optimizer.SGD(learning_rate=0.1, parameters=layer.parameters()) - lookahead = paddle.incubate.optimizer.LookAhead(optimizer, alpha=0.2, k=5) + lookahead = paddle.incubate.LookAhead(optimizer, alpha=0.2, k=5) # create data loader dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) @@ -163,7 +163,7 @@ class LookAhead(Optimizer): out = linear(inp) loss = paddle.mean(out) sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) - lookahead = paddle.incubate.optimizer.LookAhead(sgd, alpha=0.2, k=5) + lookahead = paddle.incubate.LookAhead(sgd, alpha=0.2, k=5) loss.backward() lookahead.step() lookahead.clear_grad() @@ -274,7 +274,7 @@ class LookAhead(Optimizer): out = linear(inp) loss = paddle.mean(out) sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) - lookahead = paddle.incubate.optimizer.LookAhead(sgd, alpha=0.2, k=5) + lookahead = paddle.incubate.LookAhead(sgd, alpha=0.2, k=5) loss.backward() lookahead.minimize(loss) lookahead.clear_grad() @@ -282,9 +282,6 @@ class LookAhead(Optimizer): """ assert isinstance(loss, Variable), "The loss should be an Tensor." - parameter_list = parameters if parameters \ - else self._parameter_list - # Apply inner optimizer to the main_program optimize_ops, params_grads = self.inner_optimizer.minimize( loss, diff --git a/python/paddle/incubate/optimizer/modelaverage.py b/python/paddle/incubate/optimizer/modelaverage.py index 8afcaf9207..8ffc3bdac6 100644 --- a/python/paddle/incubate/optimizer/modelaverage.py +++ b/python/paddle/incubate/optimizer/modelaverage.py @@ -21,7 +21,7 @@ import numpy as np from paddle.fluid.dygraph import base as imperative_base from paddle.fluid.wrapped_decorator import signature_safe_contextmanager -__all__ = ["ModelAverage"] +__all__ = [] class ModelAverage(Optimizer): @@ -129,7 +129,7 @@ class ModelAverage(Optimizer): layer = LinearNet() loss_fn = nn.CrossEntropyLoss() optimizer = opt.Momentum(learning_rate=0.2, momentum=0.1, parameters=layer.parameters()) - model_average = paddle.incubate.optimizer.ModelAverage(0.15, + model_average = paddle.incubate.ModelAverage(0.15, parameters=layer.parameters(), min_average_window=2, max_average_window=10) @@ -313,7 +313,7 @@ class ModelAverage(Optimizer): sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) sgd.minimize(loss) - modelaverage = paddle.incubate.optimizer.ModelAverage(0.15, + modelaverage = paddle.incubate.ModelAverage(0.15, parameters=linear.parameters(), min_average_window=2, max_average_window=4) @@ -345,7 +345,7 @@ class ModelAverage(Optimizer): out = linear(inp) loss = paddle.mean(out) sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) - modelaverage = paddle.incubate.optimizer.ModelAverage(0.15, + modelaverage = paddle.incubate.ModelAverage(0.15, parameters=linear.parameters(), min_average_window=2, max_average_window=4) @@ -395,7 +395,7 @@ class ModelAverage(Optimizer): sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) - modelaverage = paddle.incubate.optimizer.ModelAverage(0.15, + modelaverage = paddle.incubate.ModelAverage(0.15, parameters=linear.parameters(), min_average_window=2, max_average_window=4) @@ -415,7 +415,6 @@ class ModelAverage(Optimizer): param) old_num_accumulates = self._get_accumulator( 'old_num_accumulates', param) - num_updates = self._get_accumulator('num_updates', param) sum_1 = self._get_accumulator('sum_1', param) sum_2 = self._get_accumulator('sum_2', param) sum_3 = self._get_accumulator('sum_3', param) @@ -467,7 +466,7 @@ class ModelAverage(Optimizer): sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) - modelaverage = paddle.incubate.optimizer.ModelAverage(0.15, + modelaverage = paddle.incubate.ModelAverage(0.15, parameters=linear.parameters(), min_average_window=2, max_average_window=4) @@ -506,17 +505,15 @@ class ModelAverage(Optimizer): self._get_accumulator('num_accumulates', param)) old_num_accumulates = block._clone_variable( self._get_accumulator('old_num_accumulates', param)) - num_updates = block._clone_variable( - self._get_accumulator('num_updates', param)) # backup param value to grad layers.assign(input=param, output=grad) # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates) tmp = layers.sum(x=[num_accumulates, old_num_accumulates]) sum = layers.sum(x=[sum_1, sum_2, sum_3]) tmp = layers.cast( - x=tmp, dtype='float32' if self._dtype == None else self._dtype) + x=tmp, dtype='float32' if self._dtype is None else self._dtype) sum = layers.cast( - x=sum, dtype='float32' if self._dtype == None else self._dtype) + x=sum, dtype='float32' if self._dtype is None else self._dtype) layers.ops._elementwise_div(x=sum, y=tmp, out=param) def _add_average_restore_op(self, block, param): diff --git a/python/paddle/inference/__init__.py b/python/paddle/inference/__init__.py index c388301ec3..4e17203971 100644 --- a/python/paddle/inference/__init__.py +++ b/python/paddle/inference/__init__.py @@ -12,5 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ..fluid.inference import Config, DataType, PlaceType, PrecisionType, Tensor, \ - Predictor, create_predictor, get_version, get_num_bytes_of_data_type, PredictorPool +from ..fluid.inference import Config # noqa: F401 +from ..fluid.inference import DataType # noqa: F401 +from ..fluid.inference import PlaceType # noqa: F401 +from ..fluid.inference import PrecisionType # noqa: F401 +from ..fluid.inference import Tensor # noqa: F401 +from ..fluid.inference import Predictor # noqa: F401 +from ..fluid.inference import create_predictor # noqa: F401 +from ..fluid.inference import get_version # noqa: F401 +from ..fluid.inference import get_num_bytes_of_data_type # noqa: F401 +from ..fluid.inference import PredictorPool # noqa: F401 + +__all__ = [ # noqa + 'Config', + 'DataType', + 'PlaceType', + 'PrecisionType', + 'Tensor', + 'Predictor', + 'create_predictor', + 'get_version', + 'get_num_bytes_of_data_type', + 'PredictorPool' +] diff --git a/python/paddle/jit/__init__.py b/python/paddle/jit/__init__.py index 650837b2d7..576989e8e0 100644 --- a/python/paddle/jit/__init__.py +++ b/python/paddle/jit/__init__.py @@ -14,19 +14,26 @@ from __future__ import print_function -from ..fluid.dygraph.jit import save #DEFINE_ALIAS -from ..fluid.dygraph.jit import load #DEFINE_ALIAS -from ..fluid.dygraph.jit import TracedLayer #DEFINE_ALIAS -from ..fluid.dygraph.jit import set_code_level #DEFINE_ALIAS -from ..fluid.dygraph.jit import set_verbosity #DEFINE_ALIAS -from ..fluid.dygraph.jit import declarative as to_static #DEFINE_ALIAS -from ..fluid.dygraph.jit import not_to_static #DEFINE_ALIAS -from ..fluid.dygraph import ProgramTranslator #DEFINE_ALIAS -from ..fluid.dygraph.io import TranslatedLayer #DEFINE_ALIAS +from ..fluid.dygraph.jit import save # noqa: F401 +from ..fluid.dygraph.jit import load # noqa: F401 +from ..fluid.dygraph.jit import TracedLayer # noqa: F401 +from ..fluid.dygraph.jit import set_code_level # noqa: F401 +from ..fluid.dygraph.jit import set_verbosity # noqa: F401 +from ..fluid.dygraph.jit import declarative as to_static # noqa: F401 +from ..fluid.dygraph.jit import not_to_static # noqa: F401 +from ..fluid.dygraph import ProgramTranslator # noqa: F401 +from ..fluid.dygraph.io import TranslatedLayer # noqa: F401 -from . import dy2static +from . import dy2static # noqa: F401 -__all__ = [ - 'save', 'load', 'TracedLayer', 'to_static', 'ProgramTranslator', - 'TranslatedLayer', 'set_code_level', 'set_verbosity', 'not_to_static' +__all__ = [ # noqa + 'save', + 'load', + 'TracedLayer', + 'to_static', + 'ProgramTranslator', + 'TranslatedLayer', + 'set_code_level', + 'set_verbosity', + 'not_to_static' ] diff --git a/python/paddle/jit/dy2static/__init__.py b/python/paddle/jit/dy2static/__init__.py index 239b554180..030d5499c2 100644 --- a/python/paddle/jit/dy2static/__init__.py +++ b/python/paddle/jit/dy2static/__init__.py @@ -12,18 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - -from . import convert_operators -from .convert_operators import * - -from . import convert_call_func -from .convert_call_func import * - -from . import variable_trans_func -from .variable_trans_func import * +from .convert_call_func import convert_call # noqa: F401 +from .convert_operators import cast_bool_if_necessary # noqa: F401 +from .convert_operators import convert_assert # noqa: F401 +from .convert_operators import convert_ifelse # noqa: F401 +from .convert_operators import convert_len # noqa: F401 +from .convert_operators import convert_logical_and # noqa: F401 +from .convert_operators import convert_logical_not # noqa: F401 +from .convert_operators import convert_logical_or # noqa: F401 +from .convert_operators import convert_pop # noqa: F401 +from .convert_operators import convert_print # noqa: F401 +from .convert_operators import convert_shape_compare # noqa: F401 +from .convert_operators import convert_var_dtype # noqa: F401 +from .convert_operators import convert_var_shape # noqa: F401 +from .convert_operators import convert_var_shape_simple # noqa: F401 +from .convert_operators import eval_if_exist_else_none # noqa: F401 +from .convert_operators import choose_shape_attr_or_api # noqa: F401 +from .convert_operators import convert_while_loop # noqa: F401 +from .variable_trans_func import create_bool_as_type # noqa: F401 +from .variable_trans_func import create_fill_constant_node # noqa: F401 +from .variable_trans_func import create_static_variable_gast_node # noqa: F401 +from .variable_trans_func import data_layer_not_check # noqa: F401 +from .variable_trans_func import to_static_variable # noqa: F401 +from .variable_trans_func import to_static_variable_gast_node # noqa: F401 __all__ = [] -__all__ += convert_operators.__all__ -__all__ += convert_call_func.__all__ -__all__ += variable_trans_func.__all__ diff --git a/python/paddle/jit/dy2static/convert_call_func.py b/python/paddle/jit/dy2static/convert_call_func.py index be2377608e..4f6197a3cb 100644 --- a/python/paddle/jit/dy2static/convert_call_func.py +++ b/python/paddle/jit/dy2static/convert_call_func.py @@ -13,6 +13,6 @@ # limitations under the License. from __future__ import print_function -from ...fluid.dygraph.dygraph_to_static.convert_call_func import convert_call #DEFINE_ALIAS +from ...fluid.dygraph.dygraph_to_static.convert_call_func import convert_call # noqa: F401 -__all__ = ['convert_call'] +__all__ = [] diff --git a/python/paddle/jit/dy2static/convert_operators.py b/python/paddle/jit/dy2static/convert_operators.py index 9321cf4a0b..8d67e06d9b 100644 --- a/python/paddle/jit/dy2static/convert_operators.py +++ b/python/paddle/jit/dy2static/convert_operators.py @@ -13,27 +13,21 @@ # limitations under the License. from __future__ import print_function -from ...fluid.dygraph.dygraph_to_static.convert_operators import cast_bool_if_necessary #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_assert #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_ifelse #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_len #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_and #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_not #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_or #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_pop #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_print #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_shape_compare #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_dtype #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_shape #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_shape_simple #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import eval_if_exist_else_none #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import choose_shape_attr_or_api #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_while_loop #DEFINE_ALIAS +from ...fluid.dygraph.dygraph_to_static.convert_operators import cast_bool_if_necessary # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_assert # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_ifelse # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_len # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_and # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_not # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_or # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_pop # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_print # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_shape_compare # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_dtype # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_shape # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_shape_simple # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import eval_if_exist_else_none # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import choose_shape_attr_or_api # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_while_loop # noqa: F401 -__all__ = [ - 'cast_bool_if_necessary', 'convert_assert', 'convert_ifelse', 'convert_len', - 'convert_logical_and', 'convert_logical_not', 'convert_logical_or', - 'convert_pop', 'convert_print', 'convert_shape_compare', - 'convert_var_dtype', 'convert_var_shape', 'convert_var_shape_simple', - 'eval_if_exist_else_none', 'choose_shape_attr_or_api', 'convert_while_loop' -] +__all__ = [] diff --git a/python/paddle/jit/dy2static/variable_trans_func.py b/python/paddle/jit/dy2static/variable_trans_func.py index 2deb1bbb0e..9ce2bc2da3 100644 --- a/python/paddle/jit/dy2static/variable_trans_func.py +++ b/python/paddle/jit/dy2static/variable_trans_func.py @@ -14,15 +14,11 @@ from __future__ import print_function -from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_bool_as_type #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_fill_constant_node #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_static_variable_gast_node #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.variable_trans_func import data_layer_not_check #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable #DEFINE_ALIAS -from ...fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable_gast_node #DEFINE_ALIAS +from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_bool_as_type # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_fill_constant_node # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_static_variable_gast_node # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.variable_trans_func import data_layer_not_check # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable_gast_node # noqa: F401 -__all__ = [ - 'create_bool_as_type', 'create_fill_constant_node', - 'create_static_variable_gast_node', 'data_layer_not_check', - 'to_static_variable', 'to_static_variable_gast_node' -] +__all__ = [] diff --git a/python/paddle/metric/__init__.py b/python/paddle/metric/__init__.py index e41f6d76dd..2f2ef4c6f5 100644 --- a/python/paddle/metric/__init__.py +++ b/python/paddle/metric/__init__.py @@ -12,7 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .metrics import * -from . import metrics +from .metrics import Metric # noqa: F401 +from .metrics import Accuracy # noqa: F401 +from .metrics import Precision # noqa: F401 +from .metrics import Recall # noqa: F401 +from .metrics import Auc # noqa: F401 +from .metrics import accuracy # noqa: F401 -__all__ = metrics.__all__ +__all__ = [ #noqa + 'Metric', + 'Accuracy', + 'Precision', + 'Recall', + 'Auc', + 'accuracy' +] diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index d8e400b08b..40758fb8dc 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -26,7 +26,7 @@ from ..fluid.layers.nn import topk from ..fluid.framework import core, _varbase_creator, in_dygraph_mode import paddle -__all__ = ['Metric', 'Accuracy', 'Precision', 'Recall', 'Auc', 'accuracy'] +__all__ = [] def _is_numpy_(var): diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index 7cf3f94872..3ccb9e957f 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -286,5 +286,6 @@ __all__ = [ #noqa 'Swish', 'PixelShuffle', 'ELU', - 'ReLU6' + 'ReLU6', + 'LayerDict' ] diff --git a/python/paddle/nn/functional/__init__.py b/python/paddle/nn/functional/__init__.py index d4c17a27a6..ff18afa9d2 100644 --- a/python/paddle/nn/functional/__init__.py +++ b/python/paddle/nn/functional/__init__.py @@ -194,5 +194,6 @@ __all__ = [ #noqa 'embedding', 'gather_tree', 'one_hot', - 'normalize' + 'normalize', + 'temporal_shift' ] diff --git a/python/paddle/onnx/__init__.py b/python/paddle/onnx/__init__.py index 885d1968ce..8853e78bf3 100644 --- a/python/paddle/onnx/__init__.py +++ b/python/paddle/onnx/__init__.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function -from .export import export +from .export import export # noqa: F401 __all__ = ['export'] diff --git a/python/paddle/onnx/export.py b/python/paddle/onnx/export.py index 4b99b42bb0..b8a217a513 100644 --- a/python/paddle/onnx/export.py +++ b/python/paddle/onnx/export.py @@ -15,7 +15,7 @@ import os from paddle.utils import try_import -__all__ = ['export'] +__all__ = [] def export(layer, path, input_spec=None, opset_version=9, **configs): diff --git a/python/paddle/static/__init__.py b/python/paddle/static/__init__.py index 688bff4a67..93394f9b5a 100644 --- a/python/paddle/static/__init__.py +++ b/python/paddle/static/__init__.py @@ -85,11 +85,21 @@ __all__ = [ #noqa 'load', 'save_inference_model', 'load_inference_model', + 'serialize_program', + 'serialize_persistables', + 'save_to_file', + 'deserialize_program', + 'deserialize_persistables', + 'load_from_file', 'normalize_program', 'load_program_state', 'set_program_state', 'cpu_places', 'cuda_places', 'Variable', - 'create_global_var' + 'create_global_var', + 'accuracy', + 'auc', + 'device_guard', + 'create_parameter' ] diff --git a/python/paddle/static/nn/__init__.py b/python/paddle/static/nn/__init__.py index 416f6e4f3d..b589d9f878 100644 --- a/python/paddle/static/nn/__init__.py +++ b/python/paddle/static/nn/__init__.py @@ -68,7 +68,6 @@ __all__ = [ #noqa 'conv2d_transpose', 'conv3d', 'conv3d_transpose', - 'create_parameter', 'crf_decoding', 'data_norm', 'deform_conv2d', diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index c8d80fc9bc..5aeae126d8 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -25,7 +25,6 @@ from .creation import ones_like # noqa: F401 from .creation import zeros # noqa: F401 from .creation import zeros_like # noqa: F401 from .creation import arange # noqa: F401 -from .creation import eye # noqa: F401 from .creation import full # noqa: F401 from .creation import full_like # noqa: F401 from .creation import triu # noqa: F401 @@ -82,7 +81,6 @@ from .manipulation import squeeze # noqa: F401 from .manipulation import squeeze_ # noqa: F401 from .manipulation import stack # noqa: F401 from .manipulation import strided_slice # noqa: F401 -from .manipulation import transpose # noqa: F401 from .manipulation import unique # noqa: F401 from .manipulation import unsqueeze # noqa: F401 from .manipulation import unsqueeze_ # noqa: F401 @@ -143,7 +141,6 @@ from .math import add # noqa: F401 from .math import add_ # noqa: F401 from .math import subtract # noqa: F401 from .math import subtract_ # noqa: F401 -from .math import atan # noqa: F401 from .math import logsumexp # noqa: F401 from .math import inverse # noqa: F401 from .math import log2 # noqa: F401 @@ -227,7 +224,6 @@ tensor_method_func = [ #noqa 'log2', 'log10', 'logsumexp', - 'mul', 'multiplex', 'pow', 'prod', -- GitLab