From 45f8b9d08511af08394e99d9901a5ef5bb8201fe Mon Sep 17 00:00:00 2001 From: zhiboniu <31800336+zhiboniu@users.noreply.github.com> Date: Fri, 11 Jun 2021 15:32:29 +0800 Subject: [PATCH] update 2.0 public api in vision (#33307) * update 2.0 public api in vision * fix some flake8 errors --- python/paddle/hapi/callbacks.py | 10 +-- python/paddle/hapi/model.py | 59 ++++++++------- python/paddle/metric/metrics.py | 2 +- python/paddle/tests/test_callback_visualdl.py | 2 +- python/paddle/vision/__init__.py | 63 ++++++++++++---- python/paddle/vision/datasets/__init__.py | 34 +++++---- python/paddle/vision/datasets/cifar.py | 2 +- python/paddle/vision/datasets/flowers.py | 2 +- python/paddle/vision/datasets/folder.py | 2 +- python/paddle/vision/datasets/mnist.py | 2 +- python/paddle/vision/datasets/voc2012.py | 2 +- python/paddle/vision/image.py | 2 +- python/paddle/vision/models/__init__.py | 50 +++++++++---- python/paddle/vision/models/lenet.py | 2 +- python/paddle/vision/models/mobilenetv1.py | 2 +- python/paddle/vision/models/mobilenetv2.py | 2 +- python/paddle/vision/models/resnet.py | 4 +- python/paddle/vision/models/vgg.py | 8 +- python/paddle/vision/ops.py | 8 +- python/paddle/vision/transforms/__init__.py | 73 +++++++++++++++++-- python/paddle/vision/transforms/functional.py | 6 +- .../vision/transforms/functional_cv2.py | 4 +- .../vision/transforms/functional_pil.py | 4 +- .../vision/transforms/functional_tensor.py | 2 + python/paddle/vision/transforms/transforms.py | 8 +- 25 files changed, 236 insertions(+), 119 deletions(-) diff --git a/python/paddle/hapi/callbacks.py b/python/paddle/hapi/callbacks.py index 61ae8b42d6..2bdde3879a 100644 --- a/python/paddle/hapi/callbacks.py +++ b/python/paddle/hapi/callbacks.py @@ -324,7 +324,7 @@ class ProgBarLogger(Callback): ]) train_dataset = MNIST(mode='train', transform=transform) - lenet = paddle.vision.LeNet() + lenet = paddle.vision.models.LeNet() model = paddle.Model(lenet, inputs, labels) @@ -554,7 +554,7 @@ class ModelCheckpoint(Callback): ]) train_dataset = MNIST(mode='train', transform=transform) - lenet = paddle.vision.LeNet() + lenet = paddle.vision.models.LeNet() model = paddle.Model(lenet, inputs, labels) @@ -614,7 +614,7 @@ class LRScheduler(Callback): ]) train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform) - lenet = paddle.vision.LeNet() + lenet = paddle.vision.models.LeNet() model = paddle.Model(lenet, inputs, labels) @@ -630,7 +630,7 @@ class LRScheduler(Callback): boundaries=boundaries, values=values) learning_rate = paddle.optimizer.lr.LinearWarmup( learning_rate=learning_rate, - warmup_steps=wamup_epochs, + warmup_steps=wamup_steps, start_lr=base_lr / 5., end_lr=base_lr, verbose=True) @@ -856,7 +856,7 @@ class VisualDL(Callback): train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform) eval_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform) - net = paddle.vision.LeNet() + net = paddle.vision.models.LeNet() model = paddle.Model(net, inputs, labels) optim = paddle.optimizer.Adam(0.001, parameters=net.parameters()) diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 160d6c5475..1c76c9174f 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -30,20 +30,28 @@ from collections import Iterable import paddle from paddle import fluid from paddle.fluid import core -from paddle.fluid.framework import in_dygraph_mode, Variable, ParamBase, _current_expected_place -from paddle.fluid.framework import in_dygraph_mode, Variable, _get_paddle_place +from paddle.fluid.framework import in_dygraph_mode +from paddle.fluid.framework import Variable +from paddle.fluid.framework import ParamBase +from paddle.fluid.framework import _current_expected_place +from paddle.fluid.framework import _get_paddle_place from paddle.fluid.framework import _current_expected_place as _get_device from paddle.fluid.executor import global_scope from paddle.fluid.io import is_belong_to_optimizer from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.parallel import ParallelEnv -from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, FunctionSpec -from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator +from paddle.fluid.dygraph.dygraph_to_static.program_translator import FunctionSpec +from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX +from paddle.fluid.dygraph.io import INFER_PARAMS_SUFFIX from paddle.fluid.layers.utils import flatten from paddle.fluid.layers import collective -from paddle.io import DataLoader, Dataset, DistributedBatchSampler -from paddle.fluid.executor import scope_guard, Executor +from paddle.io import DataLoader +from paddle.io import Dataset +from paddle.io import DistributedBatchSampler +from paddle.fluid.executor import scope_guard +from paddle.fluid.executor import Executor from paddle.fluid.dygraph.layers import Layer from paddle.metric import Metric from paddle.static import InputSpec as Input @@ -166,7 +174,6 @@ def init_communicator(program, rank, nranks, wait_port, current_endpoint, name=unique_name.generate('hccl_id'), persistable=True, type=core.VarDesc.VarType.RAW) - endpoint_to_index_map = {e: idx for idx, e in enumerate(endpoints)} block.append_op( type='c_gen_hccl_id', inputs={}, @@ -710,10 +717,10 @@ class DynamicGraphAdapter(object): enable=self._amp_level != 'O0', **self._amp_custom_lists): if self._nranks > 1: outputs = self.ddp_model.forward( - * [to_variable(x) for x in inputs]) + *[to_variable(x) for x in inputs]) else: outputs = self.model.network.forward( - * [to_variable(x) for x in inputs]) + *[to_variable(x) for x in inputs]) losses = self.model._loss(*(to_list(outputs) + labels)) losses = to_list(losses) @@ -732,7 +739,7 @@ class DynamicGraphAdapter(object): metrics = [] for metric in self.model._metrics: metric_outs = metric.compute(*(to_list(outputs) + labels)) - m = metric.update(* [to_numpy(m) for m in to_list(metric_outs)]) + m = metric.update(*[to_numpy(m) for m in to_list(metric_outs)]) metrics.append(m) return ([to_numpy(l) for l in losses], metrics) \ @@ -746,7 +753,7 @@ class DynamicGraphAdapter(object): labels = labels or [] labels = [to_variable(l) for l in to_list(labels)] - outputs = self.model.network.forward(* [to_variable(x) for x in inputs]) + outputs = self.model.network.forward(*[to_variable(x) for x in inputs]) if self.model._loss: losses = self.model._loss(*(to_list(outputs) + labels)) losses = to_list(losses) @@ -777,7 +784,7 @@ class DynamicGraphAdapter(object): self._merge_count[self.mode + '_batch'] = samples metric_outs = metric.compute(*(to_list(outputs) + labels)) - m = metric.update(* [to_numpy(m) for m in to_list(metric_outs)]) + m = metric.update(*[to_numpy(m) for m in to_list(metric_outs)]) metrics.append(m) if self.model._loss and len(metrics): @@ -1363,8 +1370,9 @@ class Model(object): # pure float16 training has some restricts now if self._adapter._amp_level == "O2": if in_dygraph_mode(): - warnings.warn("Pure float16 training is not supported in dygraph mode now, "\ - "and it will be supported in future version.") + warnings.warn( + "Pure float16 training is not supported in dygraph mode now, and it will be supported in future version." + ) else: # grad clip is not supported in pure fp16 training now assert self._optimizer._grad_clip is None, \ @@ -1398,8 +1406,7 @@ class Model(object): if 'use_pure_fp16' in amp_configs: raise ValueError( - "''use_pure_fp16' is an invalid parameter, " - "the level of mixed precision training only depends on 'O1' or 'O2'." + "'use_pure_fp16' is an invalid parameter, the level of mixed precision training only depends on 'O1' or 'O2'." ) _check_pure_fp16_configs() @@ -1427,9 +1434,8 @@ class Model(object): } if amp_config_key_set - accepted_param_set: raise ValueError( - "Except for 'level', the keys of 'amp_configs' must be accepted by mixed precision APIs, " - "but {} could not be recognized.".format( - tuple(amp_config_key_set - accepted_param_set))) + "Except for 'level', the keys of 'amp_configs' must be accepted by mixed precision APIs, but {} could not be recognized.". + format(tuple(amp_config_key_set - accepted_param_set))) if 'use_fp16_guard' in amp_config_key_set: if in_dygraph_mode(): @@ -1501,8 +1507,9 @@ class Model(object): self._optimizer = optimizer if loss is not None: if not isinstance(loss, paddle.nn.Layer) and not callable(loss): - raise TypeError("'loss' must be sub classes of " \ - "`paddle.nn.Layer` or any callable function.") + raise TypeError( + "'loss' must be sub classes of `paddle.nn.Layer` or any callable function." + ) self._loss = loss metrics = metrics or [] @@ -2080,7 +2087,7 @@ class Model(object): input = InputSpec([None, 1, 28, 28], 'float32', 'image') label = InputSpec([None, 1], 'int64', 'label') - model = paddle.Model(paddle.vision.LeNet(), + model = paddle.Model(paddle.vision.models.LeNet(), input, label) optim = paddle.optimizer.Adam( learning_rate=0.001, parameters=model.parameters()) @@ -2122,9 +2129,11 @@ class Model(object): else: out_specs = to_list(specs) elif isinstance(specs, dict): - assert is_input == False - out_specs = [specs[n] \ - for n in extract_args(self.network.forward) if n != 'self'] + assert is_input is False + out_specs = [ + specs[n] for n in extract_args(self.network.forward) + if n != 'self' + ] else: out_specs = to_list(specs) # Note: checks each element has specificed `name`. diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index 61d1eb0e37..d8e400b08b 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -222,7 +222,7 @@ class Accuracy(Metric): transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) train_dataset = MNIST(mode='train', transform=transform) - model = paddle.Model(paddle.vision.LeNet(), input, label) + model = paddle.Model(paddle.vision.models.LeNet(), input, label) optim = paddle.optimizer.Adam( learning_rate=0.001, parameters=model.parameters()) model.prepare( diff --git a/python/paddle/tests/test_callback_visualdl.py b/python/paddle/tests/test_callback_visualdl.py index 3631618310..db3b83f2b1 100644 --- a/python/paddle/tests/test_callback_visualdl.py +++ b/python/paddle/tests/test_callback_visualdl.py @@ -55,7 +55,7 @@ class TestCallbacks(unittest.TestCase): train_dataset = MnistDataset(mode='train', transform=transform) eval_dataset = MnistDataset(mode='test', transform=transform) - net = paddle.vision.LeNet() + net = paddle.vision.models.LeNet() model = paddle.Model(net, inputs, labels) optim = paddle.optimizer.Adam(0.001, parameters=net.parameters()) diff --git a/python/paddle/vision/__init__.py b/python/paddle/vision/__init__.py index aeb07bf281..79fb7844dd 100644 --- a/python/paddle/vision/__init__.py +++ b/python/paddle/vision/__init__.py @@ -11,22 +11,59 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import paddle +import paddle.nn as nn +from . import models # noqa: F401 +from . import transforms # noqa: F401 +from . import datasets # noqa: F401 +from . import ops # noqa: F401 +from .image import set_image_backend # noqa: F401 +from .image import get_image_backend # noqa: F401 +from .image import image_load # noqa: F401 +from .models import LeNet as models_LeNet +import paddle.utils.deprecated as deprecated -from . import models -from .models import * +__all__ = [ #noqa + 'set_image_backend', 'get_image_backend', 'image_load' +] -from . import transforms -from .transforms import * -from . import datasets -from .datasets import * +class LeNet(models_LeNet): + """LeNet model from + `"LeCun Y, Bottou L, Bengio Y, et al. Gradient-based learning applied to document recognition[J]. Proceedings of the IEEE, 1998, 86(11): 2278-2324.`_ -from . import image -from .image import * + Args: + num_classes (int): output dim of last fc layer. If num_classes <=0, last fc layer + will not be defined. Default: 10. -from . import ops + Examples: + .. code-block:: python -__all__ = models.__all__ \ - + transforms.__all__ \ - + datasets.__all__ \ - + image.__all__ + from paddle.vision.models import LeNet + + model = LeNet() + """ + + @deprecated( + since="2.0.0", + update_to="paddle.vision.models.LeNet", + level=1, + reason="Please use new API in models, paddle.vision.LeNet will be removed in future" + ) + def __init__(self, num_classes=10): + super(LeNet, self).__init__(num_classes=10) + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2D( + 1, 6, 3, stride=1, padding=1), + nn.ReLU(), + nn.MaxPool2D(2, 2), + nn.Conv2D( + 6, 16, 5, stride=1, padding=0), + nn.ReLU(), + nn.MaxPool2D(2, 2)) + + if num_classes > 0: + self.fc = nn.Sequential( + nn.Linear(400, 120), + nn.Linear(120, 84), nn.Linear(84, num_classes)) diff --git a/python/paddle/vision/datasets/__init__.py b/python/paddle/vision/datasets/__init__.py index 6703aa4197..3ee7503e27 100644 --- a/python/paddle/vision/datasets/__init__.py +++ b/python/paddle/vision/datasets/__init__.py @@ -12,20 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import folder -from . import mnist -from . import flowers -from . import cifar -from . import voc2012 +from .folder import DatasetFolder # noqa: F401 +from .folder import ImageFolder # noqa: F401 +from .mnist import MNIST # noqa: F401 +from .mnist import FashionMNIST # noqa: F401 +from .flowers import Flowers # noqa: F401 +from .cifar import Cifar10 # noqa: F401 +from .cifar import Cifar100 # noqa: F401 +from .voc2012 import VOC2012 # noqa: F401 -from .folder import * -from .mnist import * -from .flowers import * -from .cifar import * -from .voc2012 import * - -__all__ = folder.__all__ \ - + mnist.__all__ \ - + flowers.__all__ \ - + cifar.__all__ \ - + voc2012.__all__ +__all__ = [ #noqa + 'DatasetFolder' + 'ImageFolder', + 'MNIST', + 'FashionMNIST', + 'Flowers', + 'Cifar10', + 'Cifar100', + 'VOC2012' +] diff --git a/python/paddle/vision/datasets/cifar.py b/python/paddle/vision/datasets/cifar.py index 0a0a48026a..2a582d7d0a 100644 --- a/python/paddle/vision/datasets/cifar.py +++ b/python/paddle/vision/datasets/cifar.py @@ -24,7 +24,7 @@ import paddle from paddle.io import Dataset from paddle.dataset.common import _check_exists_and_download -__all__ = ['Cifar10', 'Cifar100'] +__all__ = [] URL_PREFIX = 'https://dataset.bj.bcebos.com/cifar/' CIFAR10_URL = URL_PREFIX + 'cifar-10-python.tar.gz' diff --git a/python/paddle/vision/datasets/flowers.py b/python/paddle/vision/datasets/flowers.py index 448d6efb52..11b781b7a6 100644 --- a/python/paddle/vision/datasets/flowers.py +++ b/python/paddle/vision/datasets/flowers.py @@ -25,7 +25,7 @@ from paddle.io import Dataset from paddle.utils import try_import from paddle.dataset.common import _check_exists_and_download -__all__ = ["Flowers"] +__all__ = [] DATA_URL = 'http://paddlemodels.bj.bcebos.com/flowers/102flowers.tgz' LABEL_URL = 'http://paddlemodels.bj.bcebos.com/flowers/imagelabels.mat' diff --git a/python/paddle/vision/datasets/folder.py b/python/paddle/vision/datasets/folder.py index 718af04130..220b3d8ecb 100644 --- a/python/paddle/vision/datasets/folder.py +++ b/python/paddle/vision/datasets/folder.py @@ -20,7 +20,7 @@ import paddle from paddle.io import Dataset from paddle.utils import try_import -__all__ = ["DatasetFolder", "ImageFolder"] +__all__ = [] def has_valid_extension(filename, extensions): diff --git a/python/paddle/vision/datasets/mnist.py b/python/paddle/vision/datasets/mnist.py index 1b998fd71a..84760f9598 100644 --- a/python/paddle/vision/datasets/mnist.py +++ b/python/paddle/vision/datasets/mnist.py @@ -24,7 +24,7 @@ import paddle from paddle.io import Dataset from paddle.dataset.common import _check_exists_and_download -__all__ = ["MNIST", "FashionMNIST"] +__all__ = [] class MNIST(Dataset): diff --git a/python/paddle/vision/datasets/voc2012.py b/python/paddle/vision/datasets/voc2012.py index 1a42d143f0..5a82d7864c 100644 --- a/python/paddle/vision/datasets/voc2012.py +++ b/python/paddle/vision/datasets/voc2012.py @@ -23,7 +23,7 @@ import paddle from paddle.io import Dataset from paddle.dataset.common import _check_exists_and_download -__all__ = ["VOC2012"] +__all__ = [] VOC_URL = 'https://dataset.bj.bcebos.com/voc/VOCtrainval_11-May-2012.tar' diff --git a/python/paddle/vision/image.py b/python/paddle/vision/image.py index 19986816b7..5c260b1d90 100644 --- a/python/paddle/vision/image.py +++ b/python/paddle/vision/image.py @@ -15,7 +15,7 @@ from PIL import Image from paddle.utils import try_import -__all__ = ['set_image_backend', 'get_image_backend', 'image_load'] +__all__ = [] _image_backend = 'pil' diff --git a/python/paddle/vision/models/__init__.py b/python/paddle/vision/models/__init__.py index 60d8c246ae..d38f3b1722 100644 --- a/python/paddle/vision/models/__init__.py +++ b/python/paddle/vision/models/__init__.py @@ -12,20 +12,38 @@ #See the License for the specific language governing permissions and #limitations under the License. -from . import resnet -from . import vgg -from . import mobilenetv1 -from . import mobilenetv2 -from . import lenet +from .resnet import ResNet # noqa: F401 +from .resnet import resnet18 # noqa: F401 +from .resnet import resnet34 # noqa: F401 +from .resnet import resnet50 # noqa: F401 +from .resnet import resnet101 # noqa: F401 +from .resnet import resnet152 # noqa: F401 +from .mobilenetv1 import MobileNetV1 # noqa: F401 +from .mobilenetv1 import mobilenet_v1 # noqa: F401 +from .mobilenetv2 import MobileNetV2 # noqa: F401 +from .mobilenetv2 import mobilenet_v2 # noqa: F401 +from .vgg import VGG # noqa: F401 +from .vgg import vgg11 # noqa: F401 +from .vgg import vgg13 # noqa: F401 +from .vgg import vgg16 # noqa: F401 +from .vgg import vgg19 # noqa: F401 +from .lenet import LeNet # noqa: F401 -from .resnet import * -from .mobilenetv1 import * -from .mobilenetv2 import * -from .vgg import * -from .lenet import * - -__all__ = resnet.__all__ \ - + vgg.__all__ \ - + mobilenetv1.__all__ \ - + mobilenetv2.__all__ \ - + lenet.__all__ +__all__ = [ #noqa + 'ResNet', + 'resnet18', + 'resnet34', + 'resnet50', + 'resnet101', + 'resnet152', + 'VGG', + 'vgg11', + 'vgg13', + 'vgg16', + 'vgg19', + 'MobileNetV1', + 'mobilenet_v1', + 'MobileNetV2', + 'mobilenet_v2', + 'LeNet' +] diff --git a/python/paddle/vision/models/lenet.py b/python/paddle/vision/models/lenet.py index 2fb50fc17b..46212f46f3 100644 --- a/python/paddle/vision/models/lenet.py +++ b/python/paddle/vision/models/lenet.py @@ -15,7 +15,7 @@ import paddle import paddle.nn as nn -__all__ = ['LeNet'] +__all__ = [] class LeNet(nn.Layer): diff --git a/python/paddle/vision/models/mobilenetv1.py b/python/paddle/vision/models/mobilenetv1.py index 22d177248e..671a2cd8df 100644 --- a/python/paddle/vision/models/mobilenetv1.py +++ b/python/paddle/vision/models/mobilenetv1.py @@ -17,7 +17,7 @@ import paddle.nn as nn from paddle.utils.download import get_weights_path_from_url -__all__ = ['MobileNetV1', 'mobilenet_v1'] +__all__ = [] model_urls = { 'mobilenetv1_1.0': diff --git a/python/paddle/vision/models/mobilenetv2.py b/python/paddle/vision/models/mobilenetv2.py index f1cbaab1f9..74071fc121 100644 --- a/python/paddle/vision/models/mobilenetv2.py +++ b/python/paddle/vision/models/mobilenetv2.py @@ -20,7 +20,7 @@ import paddle.nn.functional as F from paddle.utils.download import get_weights_path_from_url -__all__ = ['MobileNetV2', 'mobilenet_v2'] +__all__ = [] model_urls = { 'mobilenetv2_1.0': diff --git a/python/paddle/vision/models/resnet.py b/python/paddle/vision/models/resnet.py index 1f44e0bc6d..5be69c93e8 100644 --- a/python/paddle/vision/models/resnet.py +++ b/python/paddle/vision/models/resnet.py @@ -20,9 +20,7 @@ import paddle.nn as nn from paddle.utils.download import get_weights_path_from_url -__all__ = [ - 'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152' -] +__all__ = [] model_urls = { 'resnet18': ('https://paddle-hapi.bj.bcebos.com/models/resnet18.pdparams', diff --git a/python/paddle/vision/models/vgg.py b/python/paddle/vision/models/vgg.py index f6b4c75e84..d526de8208 100644 --- a/python/paddle/vision/models/vgg.py +++ b/python/paddle/vision/models/vgg.py @@ -17,13 +17,7 @@ import paddle.nn as nn from paddle.utils.download import get_weights_path_from_url -__all__ = [ - 'VGG', - 'vgg11', - 'vgg13', - 'vgg16', - 'vgg19', -] +__all__ = [] model_urls = { 'vgg16': ('https://paddle-hapi.bj.bcebos.com/models/vgg16.pdparams', diff --git a/python/paddle/vision/ops.py b/python/paddle/vision/ops.py index 60a7a90c9b..53beedb885 100644 --- a/python/paddle/vision/ops.py +++ b/python/paddle/vision/ops.py @@ -22,8 +22,12 @@ from ..fluid.initializer import Normal from paddle.common_ops_import import * -__all__ = [ - 'yolo_loss', 'yolo_box', 'deform_conv2d', 'DeformConv2D', 'read_file', +__all__ = [ #noqa + 'yolo_loss', + 'yolo_box', + 'deform_conv2d', + 'DeformConv2D', + 'read_file', 'decode_jpeg' ] diff --git a/python/paddle/vision/transforms/__init__.py b/python/paddle/vision/transforms/__init__.py index f7c5b63b19..413f09f786 100644 --- a/python/paddle/vision/transforms/__init__.py +++ b/python/paddle/vision/transforms/__init__.py @@ -12,11 +12,70 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import transforms -from . import functional +from .transforms import BaseTransform # noqa: F401 +from .transforms import Compose # noqa: F401 +from .transforms import Resize # noqa: F401 +from .transforms import RandomResizedCrop # noqa: F401 +from .transforms import CenterCrop # noqa: F401 +from .transforms import RandomHorizontalFlip # noqa: F401 +from .transforms import RandomVerticalFlip # noqa: F401 +from .transforms import Transpose # noqa: F401 +from .transforms import Normalize # noqa: F401 +from .transforms import BrightnessTransform # noqa: F401 +from .transforms import SaturationTransform # noqa: F401 +from .transforms import ContrastTransform # noqa: F401 +from .transforms import HueTransform # noqa: F401 +from .transforms import ColorJitter # noqa: F401 +from .transforms import RandomCrop # noqa: F401 +from .transforms import Pad # noqa: F401 +from .transforms import RandomRotation # noqa: F401 +from .transforms import Grayscale # noqa: F401 +from .transforms import ToTensor # noqa: F401 +from .functional import to_tensor # noqa: F401 +from .functional import hflip # noqa: F401 +from .functional import vflip # noqa: F401 +from .functional import resize # noqa: F401 +from .functional import pad # noqa: F401 +from .functional import rotate # noqa: F401 +from .functional import to_grayscale # noqa: F401 +from .functional import crop # noqa: F401 +from .functional import center_crop # noqa: F401 +from .functional import adjust_brightness # noqa: F401 +from .functional import adjust_contrast # noqa: F401 +from .functional import adjust_hue # noqa: F401 +from .functional import normalize # noqa: F401 -from .transforms import * -from .functional import * - -__all__ = transforms.__all__ \ - + functional.__all__ +__all__ = [ #noqa + 'BaseTransform', + 'Compose', + 'Resize', + 'RandomResizedCrop', + 'CenterCrop', + 'RandomHorizontalFlip', + 'RandomVerticalFlip', + 'Transpose', + 'Normalize', + 'BrightnessTransform', + 'SaturationTransform', + 'ContrastTransform', + 'HueTransform', + 'ColorJitter', + 'RandomCrop', + 'Pad', + 'RandomRotation', + 'Grayscale', + 'ToTensor', + 'to_tensor', + 'hflip', + 'vflip', + 'resize', + 'pad', + 'rotate', + 'to_grayscale', + 'crop', + 'center_crop', + 'adjust_brightness', + 'adjust_contrast', + 'adjust_hue', + 'normalize' +] diff --git a/python/paddle/vision/transforms/functional.py b/python/paddle/vision/transforms/functional.py index 18a35915c9..3087d5c3ed 100644 --- a/python/paddle/vision/transforms/functional.py +++ b/python/paddle/vision/transforms/functional.py @@ -29,11 +29,7 @@ from . import functional_pil as F_pil from . import functional_cv2 as F_cv2 from . import functional_tensor as F_t -__all__ = [ - 'to_tensor', 'hflip', 'vflip', 'resize', 'pad', 'rotate', 'to_grayscale', - 'crop', 'center_crop', 'adjust_brightness', 'adjust_contrast', 'adjust_hue', - 'normalize' -] +__all__ = [] def _is_pil_image(img): diff --git a/python/paddle/vision/transforms/functional_cv2.py b/python/paddle/vision/transforms/functional_cv2.py index 99cbfd6dc4..487d79d276 100644 --- a/python/paddle/vision/transforms/functional_cv2.py +++ b/python/paddle/vision/transforms/functional_cv2.py @@ -33,6 +33,8 @@ else: Sequence = collections.abc.Sequence Iterable = collections.abc.Iterable +__all__ = [] + def to_tensor(pic, data_format='CHW'): """Converts a ``numpy.ndarray`` to paddle.Tensor. @@ -49,7 +51,7 @@ def to_tensor(pic, data_format='CHW'): """ - if not data_format in ['CHW', 'HWC']: + if data_format not in ['CHW', 'HWC']: raise ValueError('data_format should be CHW or HWC. Got {}'.format( data_format)) diff --git a/python/paddle/vision/transforms/functional_pil.py b/python/paddle/vision/transforms/functional_pil.py index eee60c5452..ae6d0cc45a 100644 --- a/python/paddle/vision/transforms/functional_pil.py +++ b/python/paddle/vision/transforms/functional_pil.py @@ -41,6 +41,8 @@ _pil_interp_from_str = { 'hamming': Image.HAMMING } +__all__ = [] + def to_tensor(pic, data_format='CHW'): """Converts a ``PIL.Image`` to paddle.Tensor. @@ -57,7 +59,7 @@ def to_tensor(pic, data_format='CHW'): """ - if not data_format in ['CHW', 'HWC']: + if data_format not in ['CHW', 'HWC']: raise ValueError('data_format should be CHW or HWC. Got {}'.format( data_format)) diff --git a/python/paddle/vision/transforms/functional_tensor.py b/python/paddle/vision/transforms/functional_tensor.py index 7f490d5791..1ec6741699 100644 --- a/python/paddle/vision/transforms/functional_tensor.py +++ b/python/paddle/vision/transforms/functional_tensor.py @@ -23,6 +23,8 @@ import paddle.nn.functional as F import sys import collections +__all__ = [] + def _assert_image_tensor(img, data_format): if not isinstance( diff --git a/python/paddle/vision/transforms/transforms.py b/python/paddle/vision/transforms/transforms.py index 00e12689c4..8a35e6c3b9 100644 --- a/python/paddle/vision/transforms/transforms.py +++ b/python/paddle/vision/transforms/transforms.py @@ -35,13 +35,7 @@ else: Sequence = collections.abc.Sequence Iterable = collections.abc.Iterable -__all__ = [ - "BaseTransform", "Compose", "Resize", "RandomResizedCrop", "CenterCrop", - "RandomHorizontalFlip", "RandomVerticalFlip", "Transpose", "Normalize", - "BrightnessTransform", "SaturationTransform", "ContrastTransform", - "HueTransform", "ColorJitter", "RandomCrop", "Pad", "RandomRotation", - "Grayscale", "ToTensor" -] +__all__ = [] def _get_image_size(img): -- GitLab