diff --git a/python/paddle/hapi/callbacks.py b/python/paddle/hapi/callbacks.py index 61ae8b42d63a909cc4dc88d4b16f0b0e8ed83c71..2bdde3879a2db2bc2487cbf27811c1fda442ced4 100644 --- a/python/paddle/hapi/callbacks.py +++ b/python/paddle/hapi/callbacks.py @@ -324,7 +324,7 @@ class ProgBarLogger(Callback): ]) train_dataset = MNIST(mode='train', transform=transform) - lenet = paddle.vision.LeNet() + lenet = paddle.vision.models.LeNet() model = paddle.Model(lenet, inputs, labels) @@ -554,7 +554,7 @@ class ModelCheckpoint(Callback): ]) train_dataset = MNIST(mode='train', transform=transform) - lenet = paddle.vision.LeNet() + lenet = paddle.vision.models.LeNet() model = paddle.Model(lenet, inputs, labels) @@ -614,7 +614,7 @@ class LRScheduler(Callback): ]) train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform) - lenet = paddle.vision.LeNet() + lenet = paddle.vision.models.LeNet() model = paddle.Model(lenet, inputs, labels) @@ -630,7 +630,7 @@ class LRScheduler(Callback): boundaries=boundaries, values=values) learning_rate = paddle.optimizer.lr.LinearWarmup( learning_rate=learning_rate, - warmup_steps=wamup_epochs, + warmup_steps=wamup_steps, start_lr=base_lr / 5., end_lr=base_lr, verbose=True) @@ -856,7 +856,7 @@ class VisualDL(Callback): train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform) eval_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform) - net = paddle.vision.LeNet() + net = paddle.vision.models.LeNet() model = paddle.Model(net, inputs, labels) optim = paddle.optimizer.Adam(0.001, parameters=net.parameters()) diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 160d6c54759d901e2529221c99dce63b29f06810..1c76c9174fd6989d34619c51d366c709efc67835 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -30,20 +30,28 @@ from collections import Iterable import paddle from paddle import fluid from paddle.fluid import core -from paddle.fluid.framework import in_dygraph_mode, Variable, ParamBase, _current_expected_place -from paddle.fluid.framework import in_dygraph_mode, Variable, _get_paddle_place +from paddle.fluid.framework import in_dygraph_mode +from paddle.fluid.framework import Variable +from paddle.fluid.framework import ParamBase +from paddle.fluid.framework import _current_expected_place +from paddle.fluid.framework import _get_paddle_place from paddle.fluid.framework import _current_expected_place as _get_device from paddle.fluid.executor import global_scope from paddle.fluid.io import is_belong_to_optimizer from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.parallel import ParallelEnv -from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, FunctionSpec -from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator +from paddle.fluid.dygraph.dygraph_to_static.program_translator import FunctionSpec +from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX +from paddle.fluid.dygraph.io import INFER_PARAMS_SUFFIX from paddle.fluid.layers.utils import flatten from paddle.fluid.layers import collective -from paddle.io import DataLoader, Dataset, DistributedBatchSampler -from paddle.fluid.executor import scope_guard, Executor +from paddle.io import DataLoader +from paddle.io import Dataset +from paddle.io import DistributedBatchSampler +from paddle.fluid.executor import scope_guard +from paddle.fluid.executor import Executor from paddle.fluid.dygraph.layers import Layer from paddle.metric import Metric from paddle.static import InputSpec as Input @@ -166,7 +174,6 @@ def init_communicator(program, rank, nranks, wait_port, current_endpoint, name=unique_name.generate('hccl_id'), persistable=True, type=core.VarDesc.VarType.RAW) - endpoint_to_index_map = {e: idx for idx, e in enumerate(endpoints)} block.append_op( type='c_gen_hccl_id', inputs={}, @@ -710,10 +717,10 @@ class DynamicGraphAdapter(object): enable=self._amp_level != 'O0', **self._amp_custom_lists): if self._nranks > 1: outputs = self.ddp_model.forward( - * [to_variable(x) for x in inputs]) + *[to_variable(x) for x in inputs]) else: outputs = self.model.network.forward( - * [to_variable(x) for x in inputs]) + *[to_variable(x) for x in inputs]) losses = self.model._loss(*(to_list(outputs) + labels)) losses = to_list(losses) @@ -732,7 +739,7 @@ class DynamicGraphAdapter(object): metrics = [] for metric in self.model._metrics: metric_outs = metric.compute(*(to_list(outputs) + labels)) - m = metric.update(* [to_numpy(m) for m in to_list(metric_outs)]) + m = metric.update(*[to_numpy(m) for m in to_list(metric_outs)]) metrics.append(m) return ([to_numpy(l) for l in losses], metrics) \ @@ -746,7 +753,7 @@ class DynamicGraphAdapter(object): labels = labels or [] labels = [to_variable(l) for l in to_list(labels)] - outputs = self.model.network.forward(* [to_variable(x) for x in inputs]) + outputs = self.model.network.forward(*[to_variable(x) for x in inputs]) if self.model._loss: losses = self.model._loss(*(to_list(outputs) + labels)) losses = to_list(losses) @@ -777,7 +784,7 @@ class DynamicGraphAdapter(object): self._merge_count[self.mode + '_batch'] = samples metric_outs = metric.compute(*(to_list(outputs) + labels)) - m = metric.update(* [to_numpy(m) for m in to_list(metric_outs)]) + m = metric.update(*[to_numpy(m) for m in to_list(metric_outs)]) metrics.append(m) if self.model._loss and len(metrics): @@ -1363,8 +1370,9 @@ class Model(object): # pure float16 training has some restricts now if self._adapter._amp_level == "O2": if in_dygraph_mode(): - warnings.warn("Pure float16 training is not supported in dygraph mode now, "\ - "and it will be supported in future version.") + warnings.warn( + "Pure float16 training is not supported in dygraph mode now, and it will be supported in future version." + ) else: # grad clip is not supported in pure fp16 training now assert self._optimizer._grad_clip is None, \ @@ -1398,8 +1406,7 @@ class Model(object): if 'use_pure_fp16' in amp_configs: raise ValueError( - "''use_pure_fp16' is an invalid parameter, " - "the level of mixed precision training only depends on 'O1' or 'O2'." + "'use_pure_fp16' is an invalid parameter, the level of mixed precision training only depends on 'O1' or 'O2'." ) _check_pure_fp16_configs() @@ -1427,9 +1434,8 @@ class Model(object): } if amp_config_key_set - accepted_param_set: raise ValueError( - "Except for 'level', the keys of 'amp_configs' must be accepted by mixed precision APIs, " - "but {} could not be recognized.".format( - tuple(amp_config_key_set - accepted_param_set))) + "Except for 'level', the keys of 'amp_configs' must be accepted by mixed precision APIs, but {} could not be recognized.". + format(tuple(amp_config_key_set - accepted_param_set))) if 'use_fp16_guard' in amp_config_key_set: if in_dygraph_mode(): @@ -1501,8 +1507,9 @@ class Model(object): self._optimizer = optimizer if loss is not None: if not isinstance(loss, paddle.nn.Layer) and not callable(loss): - raise TypeError("'loss' must be sub classes of " \ - "`paddle.nn.Layer` or any callable function.") + raise TypeError( + "'loss' must be sub classes of `paddle.nn.Layer` or any callable function." + ) self._loss = loss metrics = metrics or [] @@ -2080,7 +2087,7 @@ class Model(object): input = InputSpec([None, 1, 28, 28], 'float32', 'image') label = InputSpec([None, 1], 'int64', 'label') - model = paddle.Model(paddle.vision.LeNet(), + model = paddle.Model(paddle.vision.models.LeNet(), input, label) optim = paddle.optimizer.Adam( learning_rate=0.001, parameters=model.parameters()) @@ -2122,9 +2129,11 @@ class Model(object): else: out_specs = to_list(specs) elif isinstance(specs, dict): - assert is_input == False - out_specs = [specs[n] \ - for n in extract_args(self.network.forward) if n != 'self'] + assert is_input is False + out_specs = [ + specs[n] for n in extract_args(self.network.forward) + if n != 'self' + ] else: out_specs = to_list(specs) # Note: checks each element has specificed `name`. diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index 61d1eb0e373341374199b811f198f7e295026ecc..d8e400b08bd474a1a59399e413a59e9a7d77f8b7 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -222,7 +222,7 @@ class Accuracy(Metric): transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) train_dataset = MNIST(mode='train', transform=transform) - model = paddle.Model(paddle.vision.LeNet(), input, label) + model = paddle.Model(paddle.vision.models.LeNet(), input, label) optim = paddle.optimizer.Adam( learning_rate=0.001, parameters=model.parameters()) model.prepare( diff --git a/python/paddle/tests/test_callback_visualdl.py b/python/paddle/tests/test_callback_visualdl.py index 36316183104fe3a19bfa5e9868e26e54f5405dd1..db3b83f2b141417b363d5dbe2d4fedd2542d31ad 100644 --- a/python/paddle/tests/test_callback_visualdl.py +++ b/python/paddle/tests/test_callback_visualdl.py @@ -55,7 +55,7 @@ class TestCallbacks(unittest.TestCase): train_dataset = MnistDataset(mode='train', transform=transform) eval_dataset = MnistDataset(mode='test', transform=transform) - net = paddle.vision.LeNet() + net = paddle.vision.models.LeNet() model = paddle.Model(net, inputs, labels) optim = paddle.optimizer.Adam(0.001, parameters=net.parameters()) diff --git a/python/paddle/vision/__init__.py b/python/paddle/vision/__init__.py index aeb07bf281fb0a0289640e0591af4d864ca10b39..79fb7844dd58c664ce5c391788aacc384e49432c 100644 --- a/python/paddle/vision/__init__.py +++ b/python/paddle/vision/__init__.py @@ -11,22 +11,59 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import paddle +import paddle.nn as nn +from . import models # noqa: F401 +from . import transforms # noqa: F401 +from . import datasets # noqa: F401 +from . import ops # noqa: F401 +from .image import set_image_backend # noqa: F401 +from .image import get_image_backend # noqa: F401 +from .image import image_load # noqa: F401 +from .models import LeNet as models_LeNet +import paddle.utils.deprecated as deprecated -from . import models -from .models import * +__all__ = [ #noqa + 'set_image_backend', 'get_image_backend', 'image_load' +] -from . import transforms -from .transforms import * -from . import datasets -from .datasets import * +class LeNet(models_LeNet): + """LeNet model from + `"LeCun Y, Bottou L, Bengio Y, et al. Gradient-based learning applied to document recognition[J]. Proceedings of the IEEE, 1998, 86(11): 2278-2324.`_ -from . import image -from .image import * + Args: + num_classes (int): output dim of last fc layer. If num_classes <=0, last fc layer + will not be defined. Default: 10. -from . import ops + Examples: + .. code-block:: python -__all__ = models.__all__ \ - + transforms.__all__ \ - + datasets.__all__ \ - + image.__all__ + from paddle.vision.models import LeNet + + model = LeNet() + """ + + @deprecated( + since="2.0.0", + update_to="paddle.vision.models.LeNet", + level=1, + reason="Please use new API in models, paddle.vision.LeNet will be removed in future" + ) + def __init__(self, num_classes=10): + super(LeNet, self).__init__(num_classes=10) + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2D( + 1, 6, 3, stride=1, padding=1), + nn.ReLU(), + nn.MaxPool2D(2, 2), + nn.Conv2D( + 6, 16, 5, stride=1, padding=0), + nn.ReLU(), + nn.MaxPool2D(2, 2)) + + if num_classes > 0: + self.fc = nn.Sequential( + nn.Linear(400, 120), + nn.Linear(120, 84), nn.Linear(84, num_classes)) diff --git a/python/paddle/vision/datasets/__init__.py b/python/paddle/vision/datasets/__init__.py index 6703aa4197603be2d82d930e3cd2622ff6b4cd77..3ee7503e27979753ba97241256f769841b40b0c8 100644 --- a/python/paddle/vision/datasets/__init__.py +++ b/python/paddle/vision/datasets/__init__.py @@ -12,20 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import folder -from . import mnist -from . import flowers -from . import cifar -from . import voc2012 +from .folder import DatasetFolder # noqa: F401 +from .folder import ImageFolder # noqa: F401 +from .mnist import MNIST # noqa: F401 +from .mnist import FashionMNIST # noqa: F401 +from .flowers import Flowers # noqa: F401 +from .cifar import Cifar10 # noqa: F401 +from .cifar import Cifar100 # noqa: F401 +from .voc2012 import VOC2012 # noqa: F401 -from .folder import * -from .mnist import * -from .flowers import * -from .cifar import * -from .voc2012 import * - -__all__ = folder.__all__ \ - + mnist.__all__ \ - + flowers.__all__ \ - + cifar.__all__ \ - + voc2012.__all__ +__all__ = [ #noqa + 'DatasetFolder' + 'ImageFolder', + 'MNIST', + 'FashionMNIST', + 'Flowers', + 'Cifar10', + 'Cifar100', + 'VOC2012' +] diff --git a/python/paddle/vision/datasets/cifar.py b/python/paddle/vision/datasets/cifar.py index 0a0a48026af80eccc891df9202ab3a42f37ba06d..2a582d7d0a8e5c09830162b37c2b0aa0a493b13e 100644 --- a/python/paddle/vision/datasets/cifar.py +++ b/python/paddle/vision/datasets/cifar.py @@ -24,7 +24,7 @@ import paddle from paddle.io import Dataset from paddle.dataset.common import _check_exists_and_download -__all__ = ['Cifar10', 'Cifar100'] +__all__ = [] URL_PREFIX = 'https://dataset.bj.bcebos.com/cifar/' CIFAR10_URL = URL_PREFIX + 'cifar-10-python.tar.gz' diff --git a/python/paddle/vision/datasets/flowers.py b/python/paddle/vision/datasets/flowers.py index 448d6efb52beca953de7981312e8f9131e6fb05d..11b781b7a6dc7bc131d7a7ddf230785f86fcf417 100644 --- a/python/paddle/vision/datasets/flowers.py +++ b/python/paddle/vision/datasets/flowers.py @@ -25,7 +25,7 @@ from paddle.io import Dataset from paddle.utils import try_import from paddle.dataset.common import _check_exists_and_download -__all__ = ["Flowers"] +__all__ = [] DATA_URL = 'http://paddlemodels.bj.bcebos.com/flowers/102flowers.tgz' LABEL_URL = 'http://paddlemodels.bj.bcebos.com/flowers/imagelabels.mat' diff --git a/python/paddle/vision/datasets/folder.py b/python/paddle/vision/datasets/folder.py index 718af041307a15e5c44ece79b478d7a47bf8729c..220b3d8ecb4b412a83e452381ef021afdf0e4940 100644 --- a/python/paddle/vision/datasets/folder.py +++ b/python/paddle/vision/datasets/folder.py @@ -20,7 +20,7 @@ import paddle from paddle.io import Dataset from paddle.utils import try_import -__all__ = ["DatasetFolder", "ImageFolder"] +__all__ = [] def has_valid_extension(filename, extensions): diff --git a/python/paddle/vision/datasets/mnist.py b/python/paddle/vision/datasets/mnist.py index 1b998fd71a62e5bd21545e1548e628042fca833a..84760f9598b6adc60eb3873633db0bc87bf64785 100644 --- a/python/paddle/vision/datasets/mnist.py +++ b/python/paddle/vision/datasets/mnist.py @@ -24,7 +24,7 @@ import paddle from paddle.io import Dataset from paddle.dataset.common import _check_exists_and_download -__all__ = ["MNIST", "FashionMNIST"] +__all__ = [] class MNIST(Dataset): diff --git a/python/paddle/vision/datasets/voc2012.py b/python/paddle/vision/datasets/voc2012.py index 1a42d143f0f72b21b6f431400713500f395b03f9..5a82d7864cb009da066929c830f6213818b7c203 100644 --- a/python/paddle/vision/datasets/voc2012.py +++ b/python/paddle/vision/datasets/voc2012.py @@ -23,7 +23,7 @@ import paddle from paddle.io import Dataset from paddle.dataset.common import _check_exists_and_download -__all__ = ["VOC2012"] +__all__ = [] VOC_URL = 'https://dataset.bj.bcebos.com/voc/VOCtrainval_11-May-2012.tar' diff --git a/python/paddle/vision/image.py b/python/paddle/vision/image.py index 19986816b7cc42282050057b5cc791faa8fd1c1f..5c260b1d90a891134d344bb364d065bca2518c5b 100644 --- a/python/paddle/vision/image.py +++ b/python/paddle/vision/image.py @@ -15,7 +15,7 @@ from PIL import Image from paddle.utils import try_import -__all__ = ['set_image_backend', 'get_image_backend', 'image_load'] +__all__ = [] _image_backend = 'pil' diff --git a/python/paddle/vision/models/__init__.py b/python/paddle/vision/models/__init__.py index 60d8c246ae10e2bcb2a6576ce13a99e5e984c5bc..d38f3b1722ee8c2f31d53a26b96d3320abd2e350 100644 --- a/python/paddle/vision/models/__init__.py +++ b/python/paddle/vision/models/__init__.py @@ -12,20 +12,38 @@ #See the License for the specific language governing permissions and #limitations under the License. -from . import resnet -from . import vgg -from . import mobilenetv1 -from . import mobilenetv2 -from . import lenet +from .resnet import ResNet # noqa: F401 +from .resnet import resnet18 # noqa: F401 +from .resnet import resnet34 # noqa: F401 +from .resnet import resnet50 # noqa: F401 +from .resnet import resnet101 # noqa: F401 +from .resnet import resnet152 # noqa: F401 +from .mobilenetv1 import MobileNetV1 # noqa: F401 +from .mobilenetv1 import mobilenet_v1 # noqa: F401 +from .mobilenetv2 import MobileNetV2 # noqa: F401 +from .mobilenetv2 import mobilenet_v2 # noqa: F401 +from .vgg import VGG # noqa: F401 +from .vgg import vgg11 # noqa: F401 +from .vgg import vgg13 # noqa: F401 +from .vgg import vgg16 # noqa: F401 +from .vgg import vgg19 # noqa: F401 +from .lenet import LeNet # noqa: F401 -from .resnet import * -from .mobilenetv1 import * -from .mobilenetv2 import * -from .vgg import * -from .lenet import * - -__all__ = resnet.__all__ \ - + vgg.__all__ \ - + mobilenetv1.__all__ \ - + mobilenetv2.__all__ \ - + lenet.__all__ +__all__ = [ #noqa + 'ResNet', + 'resnet18', + 'resnet34', + 'resnet50', + 'resnet101', + 'resnet152', + 'VGG', + 'vgg11', + 'vgg13', + 'vgg16', + 'vgg19', + 'MobileNetV1', + 'mobilenet_v1', + 'MobileNetV2', + 'mobilenet_v2', + 'LeNet' +] diff --git a/python/paddle/vision/models/lenet.py b/python/paddle/vision/models/lenet.py index 2fb50fc17b9e9f1f9c8af3d5c22d8f0e35c3958a..46212f46f3a487c4ea567d049d7bc200331d34b4 100644 --- a/python/paddle/vision/models/lenet.py +++ b/python/paddle/vision/models/lenet.py @@ -15,7 +15,7 @@ import paddle import paddle.nn as nn -__all__ = ['LeNet'] +__all__ = [] class LeNet(nn.Layer): diff --git a/python/paddle/vision/models/mobilenetv1.py b/python/paddle/vision/models/mobilenetv1.py index 22d177248e8b3708a37eb04b1b0eeeece8d154cf..671a2cd8dfd5f4cebf756edb397ad1f182b895ad 100644 --- a/python/paddle/vision/models/mobilenetv1.py +++ b/python/paddle/vision/models/mobilenetv1.py @@ -17,7 +17,7 @@ import paddle.nn as nn from paddle.utils.download import get_weights_path_from_url -__all__ = ['MobileNetV1', 'mobilenet_v1'] +__all__ = [] model_urls = { 'mobilenetv1_1.0': diff --git a/python/paddle/vision/models/mobilenetv2.py b/python/paddle/vision/models/mobilenetv2.py index f1cbaab1f90accc616f5a93bba8d3fd6126770fb..74071fc121688eafbf17833a6410b94d34191ec4 100644 --- a/python/paddle/vision/models/mobilenetv2.py +++ b/python/paddle/vision/models/mobilenetv2.py @@ -20,7 +20,7 @@ import paddle.nn.functional as F from paddle.utils.download import get_weights_path_from_url -__all__ = ['MobileNetV2', 'mobilenet_v2'] +__all__ = [] model_urls = { 'mobilenetv2_1.0': diff --git a/python/paddle/vision/models/resnet.py b/python/paddle/vision/models/resnet.py index 1f44e0bc6dfeb18cd1eb99489860500a390c33de..5be69c93e8b5f05f17f7d8c4503a794682a12d15 100644 --- a/python/paddle/vision/models/resnet.py +++ b/python/paddle/vision/models/resnet.py @@ -20,9 +20,7 @@ import paddle.nn as nn from paddle.utils.download import get_weights_path_from_url -__all__ = [ - 'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152' -] +__all__ = [] model_urls = { 'resnet18': ('https://paddle-hapi.bj.bcebos.com/models/resnet18.pdparams', diff --git a/python/paddle/vision/models/vgg.py b/python/paddle/vision/models/vgg.py index f6b4c75e84f01379264fb2066b218747204fd6da..d526de8208329fb23ff4fad219db5dd706958ad8 100644 --- a/python/paddle/vision/models/vgg.py +++ b/python/paddle/vision/models/vgg.py @@ -17,13 +17,7 @@ import paddle.nn as nn from paddle.utils.download import get_weights_path_from_url -__all__ = [ - 'VGG', - 'vgg11', - 'vgg13', - 'vgg16', - 'vgg19', -] +__all__ = [] model_urls = { 'vgg16': ('https://paddle-hapi.bj.bcebos.com/models/vgg16.pdparams', diff --git a/python/paddle/vision/ops.py b/python/paddle/vision/ops.py index 60a7a90c9be89591e681192f5e886f9c5443a8c0..53beedb885a7112bc9d3f3fc33bc1c48928e500d 100644 --- a/python/paddle/vision/ops.py +++ b/python/paddle/vision/ops.py @@ -22,8 +22,12 @@ from ..fluid.initializer import Normal from paddle.common_ops_import import * -__all__ = [ - 'yolo_loss', 'yolo_box', 'deform_conv2d', 'DeformConv2D', 'read_file', +__all__ = [ #noqa + 'yolo_loss', + 'yolo_box', + 'deform_conv2d', + 'DeformConv2D', + 'read_file', 'decode_jpeg' ] diff --git a/python/paddle/vision/transforms/__init__.py b/python/paddle/vision/transforms/__init__.py index f7c5b63b19ed081ee6887850c1aa3ef918715222..413f09f78699ee995f490e86a94006cd1a48c6a0 100644 --- a/python/paddle/vision/transforms/__init__.py +++ b/python/paddle/vision/transforms/__init__.py @@ -12,11 +12,70 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import transforms -from . import functional +from .transforms import BaseTransform # noqa: F401 +from .transforms import Compose # noqa: F401 +from .transforms import Resize # noqa: F401 +from .transforms import RandomResizedCrop # noqa: F401 +from .transforms import CenterCrop # noqa: F401 +from .transforms import RandomHorizontalFlip # noqa: F401 +from .transforms import RandomVerticalFlip # noqa: F401 +from .transforms import Transpose # noqa: F401 +from .transforms import Normalize # noqa: F401 +from .transforms import BrightnessTransform # noqa: F401 +from .transforms import SaturationTransform # noqa: F401 +from .transforms import ContrastTransform # noqa: F401 +from .transforms import HueTransform # noqa: F401 +from .transforms import ColorJitter # noqa: F401 +from .transforms import RandomCrop # noqa: F401 +from .transforms import Pad # noqa: F401 +from .transforms import RandomRotation # noqa: F401 +from .transforms import Grayscale # noqa: F401 +from .transforms import ToTensor # noqa: F401 +from .functional import to_tensor # noqa: F401 +from .functional import hflip # noqa: F401 +from .functional import vflip # noqa: F401 +from .functional import resize # noqa: F401 +from .functional import pad # noqa: F401 +from .functional import rotate # noqa: F401 +from .functional import to_grayscale # noqa: F401 +from .functional import crop # noqa: F401 +from .functional import center_crop # noqa: F401 +from .functional import adjust_brightness # noqa: F401 +from .functional import adjust_contrast # noqa: F401 +from .functional import adjust_hue # noqa: F401 +from .functional import normalize # noqa: F401 -from .transforms import * -from .functional import * - -__all__ = transforms.__all__ \ - + functional.__all__ +__all__ = [ #noqa + 'BaseTransform', + 'Compose', + 'Resize', + 'RandomResizedCrop', + 'CenterCrop', + 'RandomHorizontalFlip', + 'RandomVerticalFlip', + 'Transpose', + 'Normalize', + 'BrightnessTransform', + 'SaturationTransform', + 'ContrastTransform', + 'HueTransform', + 'ColorJitter', + 'RandomCrop', + 'Pad', + 'RandomRotation', + 'Grayscale', + 'ToTensor', + 'to_tensor', + 'hflip', + 'vflip', + 'resize', + 'pad', + 'rotate', + 'to_grayscale', + 'crop', + 'center_crop', + 'adjust_brightness', + 'adjust_contrast', + 'adjust_hue', + 'normalize' +] diff --git a/python/paddle/vision/transforms/functional.py b/python/paddle/vision/transforms/functional.py index 18a35915c99da505678a2ab836d21dd0ace56ee6..3087d5c3ed57702e9bd4d8de7a9a2273876101c7 100644 --- a/python/paddle/vision/transforms/functional.py +++ b/python/paddle/vision/transforms/functional.py @@ -29,11 +29,7 @@ from . import functional_pil as F_pil from . import functional_cv2 as F_cv2 from . import functional_tensor as F_t -__all__ = [ - 'to_tensor', 'hflip', 'vflip', 'resize', 'pad', 'rotate', 'to_grayscale', - 'crop', 'center_crop', 'adjust_brightness', 'adjust_contrast', 'adjust_hue', - 'normalize' -] +__all__ = [] def _is_pil_image(img): diff --git a/python/paddle/vision/transforms/functional_cv2.py b/python/paddle/vision/transforms/functional_cv2.py index 99cbfd6dc4f8dd195960b776864bc523bdca2c71..487d79d2765347759660d7d217d608f393ff90e4 100644 --- a/python/paddle/vision/transforms/functional_cv2.py +++ b/python/paddle/vision/transforms/functional_cv2.py @@ -33,6 +33,8 @@ else: Sequence = collections.abc.Sequence Iterable = collections.abc.Iterable +__all__ = [] + def to_tensor(pic, data_format='CHW'): """Converts a ``numpy.ndarray`` to paddle.Tensor. @@ -49,7 +51,7 @@ def to_tensor(pic, data_format='CHW'): """ - if not data_format in ['CHW', 'HWC']: + if data_format not in ['CHW', 'HWC']: raise ValueError('data_format should be CHW or HWC. Got {}'.format( data_format)) diff --git a/python/paddle/vision/transforms/functional_pil.py b/python/paddle/vision/transforms/functional_pil.py index eee60c5452b2de1235c577b2eabb8de1cfdc1467..ae6d0cc45a92ae543dc6308bc6ffa2680e0fcc4b 100644 --- a/python/paddle/vision/transforms/functional_pil.py +++ b/python/paddle/vision/transforms/functional_pil.py @@ -41,6 +41,8 @@ _pil_interp_from_str = { 'hamming': Image.HAMMING } +__all__ = [] + def to_tensor(pic, data_format='CHW'): """Converts a ``PIL.Image`` to paddle.Tensor. @@ -57,7 +59,7 @@ def to_tensor(pic, data_format='CHW'): """ - if not data_format in ['CHW', 'HWC']: + if data_format not in ['CHW', 'HWC']: raise ValueError('data_format should be CHW or HWC. Got {}'.format( data_format)) diff --git a/python/paddle/vision/transforms/functional_tensor.py b/python/paddle/vision/transforms/functional_tensor.py index 7f490d57916fbcb67475cd433b09771d13261128..1ec67416998a3d03e391922ad078b827812661bf 100644 --- a/python/paddle/vision/transforms/functional_tensor.py +++ b/python/paddle/vision/transforms/functional_tensor.py @@ -23,6 +23,8 @@ import paddle.nn.functional as F import sys import collections +__all__ = [] + def _assert_image_tensor(img, data_format): if not isinstance( diff --git a/python/paddle/vision/transforms/transforms.py b/python/paddle/vision/transforms/transforms.py index 00e12689c4d9fe41e67798309fee42ce63d0f7a5..8a35e6c3b908ea9fa782c428acc6797c6fb0986c 100644 --- a/python/paddle/vision/transforms/transforms.py +++ b/python/paddle/vision/transforms/transforms.py @@ -35,13 +35,7 @@ else: Sequence = collections.abc.Sequence Iterable = collections.abc.Iterable -__all__ = [ - "BaseTransform", "Compose", "Resize", "RandomResizedCrop", "CenterCrop", - "RandomHorizontalFlip", "RandomVerticalFlip", "Transpose", "Normalize", - "BrightnessTransform", "SaturationTransform", "ContrastTransform", - "HueTransform", "ColorJitter", "RandomCrop", "Pad", "RandomRotation", - "Grayscale", "ToTensor" -] +__all__ = [] def _get_image_size(img):