diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/trainer_pass.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/trainer_pass.py index 5f327497047470dcafa93b7b07a51c63721b75ac..d4af3e2f8042a5dd83e5d4ba06cb3c89b352f8ed 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/trainer_pass.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/trainer_pass.py @@ -527,7 +527,7 @@ def create_heter_program(program, config, heter_program, heter_ops, # This function mainly includes the following contents: # 1. For every heter block: # a) copy heter device op from origin program - # b) create variables which belong to heter op: + # b) create variables which belong to heter op: # -> if variable is persistable, clone it in global_scope # -> if variable is temp, create it in heter block # c) create communicate related op as follow: diff --git a/python/paddle/fluid/tests/unittests/hccl_tools.py b/python/paddle/fluid/tests/unittests/hccl_tools.py index 3ae8f38dc64bd1da6ab5a46ddd60b239b5461ad9..e3628ee5a4e9b4bf51950fccd424e17a69883eec 100644 --- a/python/paddle/fluid/tests/unittests/hccl_tools.py +++ b/python/paddle/fluid/tests/unittests/hccl_tools.py @@ -58,7 +58,7 @@ def parse_args(): default="[0,8)", help="The number of the Ascend accelerators used. please note that the Ascend accelerators" "used must be continuous, such [0,4) means to use four chips " - "0,1,2,3; [0,1) means to use chip 0; The first four chips are" + "0,1,2,3; [0,1) means to use chip 0; The first four chips are" "a group, and the last four chips are a group. In addition to" "the [0,8) chips are allowed, other cross-group such as [3,6)" "are prohibited.") diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index 836d4008f7d0b50025f8816b003caa16c59792e3..d2f0063af0d220b7ae1f0945369bddb138860c06 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -15,148 +15,273 @@ # TODO: import all neural network related api under this directory, # including layers, linear, conv, rnn etc. -from .layer import norm -from .functional import extension -from .layer import common -from .layer import rnn -from .utils import weight_norm_hook - -from . import initializer - -__all__ = [] -__all__ += norm.__all__ -__all__ += extension.__all__ -__all__ += common.__all__ -__all__ += rnn.__all__ -__all__ += weight_norm_hook.__all__ - -# TODO: define alias in nn directory -from .clip import ClipGradByGlobalNorm #DEFINE_ALIAS -from .clip import ClipGradByNorm #DEFINE_ALIAS -from .clip import ClipGradByValue #DEFINE_ALIAS -# from .control_flow import cond #DEFINE_ALIAS -# from .control_flow import DynamicRNN #DEFINE_ALIAS -# from .control_flow import StaticRNN #DEFINE_ALIAS -# from .control_flow import while_loop #DEFINE_ALIAS -# from .control_flow import rnn #DEFINE_ALIAS -from .decode import BeamSearchDecoder #DEFINE_ALIAS -from .decode import dynamic_decode #DEFINE_ALIAS -# from .decode import Decoder #DEFINE_ALIAS -# from .decode import crf_decoding #DEFINE_ALIAS -# from .decode import ctc_greedy_decoder #DEFINE_ALIAS -# from .input import Input #DEFINE_ALIAS -from .layer.activation import ELU #DEFINE_ALIAS -from .layer.activation import GELU #DEFINE_ALIAS -from .layer.activation import Tanh #DEFINE_ALIAS -from .layer.activation import Hardshrink #DEFINE_ALIAS -from .layer.activation import Hardswish #DEFINE_ALIAS -from .layer.activation import Hardtanh #DEFINE_ALIAS -from .layer.activation import PReLU #DEFINE_ALIAS -from .layer.activation import ReLU #DEFINE_ALIAS -from .layer.activation import ReLU6 #DEFINE_ALIAS -from .layer.activation import SELU #DEFINE_ALIAS -from .layer.activation import Silu #DEFINE_ALIAS -from .layer.activation import LeakyReLU #DEFINE_ALIAS -from .layer.activation import Sigmoid #DEFINE_ALIAS -from .layer.activation import Hardsigmoid #DEFINE_ALIAS -from .layer.activation import LogSigmoid #DEFINE_ALIAS -from .layer.activation import Softmax #DEFINE_ALIAS -from .layer.activation import Softplus #DEFINE_ALIAS -from .layer.activation import Softshrink #DEFINE_ALIAS -from .layer.activation import Softsign #DEFINE_ALIAS -from .layer.activation import Swish #DEFINE_ALIAS -from .layer.activation import Tanhshrink #DEFINE_ALIAS -from .layer.activation import ThresholdedReLU #DEFINE_ALIAS -from .layer.activation import LogSoftmax #DEFINE_ALIAS -from .layer.activation import Maxout #DEFINE_ALIAS -from .layer.common import Pad1D #DEFINE_ALIAS -from .layer.common import Pad2D #DEFINE_ALIAS -from .layer.common import Pad3D #DEFINE_ALIAS -from .layer.common import CosineSimilarity #DEFINE_ALIAS -from .layer.common import Embedding #DEFINE_ALIAS -from .layer.common import Linear #DEFINE_ALIAS -from .layer.common import Flatten #DEFINE_ALIAS -from .layer.common import Upsample #DEFINE_ALIAS -from .layer.common import UpsamplingNearest2D #DEFINE_ALIAS -from .layer.common import UpsamplingBilinear2D #DEFINE_ALIAS -from .layer.common import Bilinear #DEFINE_ALIAS -from .layer.common import Dropout #DEFINE_ALIAS -from .layer.common import Dropout2D #DEFINE_ALIAS -from .layer.common import Dropout3D #DEFINE_ALIAS -from .layer.common import AlphaDropout #DEFINE_ALIAS -from .layer.common import Unfold #DEFINE_ALIAS - -from .layer.pooling import AvgPool1D #DEFINE_ALIAS -from .layer.pooling import AvgPool2D #DEFINE_ALIAS -from .layer.pooling import AvgPool3D #DEFINE_ALIAS -from .layer.pooling import MaxPool1D #DEFINE_ALIAS -from .layer.pooling import MaxPool2D #DEFINE_ALIAS -from .layer.pooling import MaxPool3D #DEFINE_ALIAS -from .layer.pooling import AdaptiveAvgPool1D #DEFINE_ALIAS -from .layer.pooling import AdaptiveAvgPool2D #DEFINE_ALIAS -from .layer.pooling import AdaptiveAvgPool3D #DEFINE_ALIAS - -from .layer.pooling import AdaptiveMaxPool1D #DEFINE_ALIAS -from .layer.pooling import AdaptiveMaxPool2D #DEFINE_ALIAS -from .layer.pooling import AdaptiveMaxPool3D #DEFINE_ALIAS -from .layer.conv import Conv1D #DEFINE_ALIAS -from .layer.conv import Conv2D #DEFINE_ALIAS -from .layer.conv import Conv3D #DEFINE_ALIAS -from .layer.conv import Conv1DTranspose #DEFINE_ALIAS -from .layer.conv import Conv2DTranspose #DEFINE_ALIAS -from .layer.conv import Conv3DTranspose #DEFINE_ALIAS -# from .layer.conv import TreeConv #DEFINE_ALIAS -# from .layer.conv import Conv1D #DEFINE_ALIAS -from .layer.common import Linear -# from .layer.loss import NCELoss #DEFINE_ALIAS -from .layer.loss import BCEWithLogitsLoss #DEFINE_ALIAS -from .layer.loss import CrossEntropyLoss #DEFINE_ALIAS -from .layer.loss import HSigmoidLoss #DEFINE_ALIAS -from .layer.loss import MSELoss #DEFINE_ALIAS -from .layer.loss import L1Loss #DEFINE_ALIAS -from .layer.loss import NLLLoss #DEFINE_ALIAS -from .layer.loss import BCELoss #DEFINE_ALIAS -from .layer.loss import KLDivLoss #DEFINE_ALIAS -from .layer.loss import MarginRankingLoss #DEFINE_ALIAS -from .layer.loss import CTCLoss #DEFINE_ALIAS -from .layer.loss import SmoothL1Loss #DEFINE_ALIAS -from .layer.norm import BatchNorm #DEFINE_ALIAS -from .layer.norm import SyncBatchNorm #DEFINE_ALIAS -from .layer.norm import GroupNorm #DEFINE_ALIAS -from .layer.norm import LayerNorm #DEFINE_ALIAS -from .layer.norm import SpectralNorm #DEFINE_ALIAS -from .layer.norm import InstanceNorm1D #DEFINE_ALIAS -from .layer.norm import InstanceNorm2D #DEFINE_ALIAS -from .layer.norm import InstanceNorm3D #DEFINE_ALIAS -from .layer.norm import BatchNorm1D #DEFINE_ALIAS -from .layer.norm import BatchNorm2D #DEFINE_ALIAS -from .layer.norm import BatchNorm3D #DEFINE_ALIAS -from .layer.norm import LocalResponseNorm #DEFINE_ALIAS - -from .layer.rnn import RNNCellBase #DEFINE_ALIAS -from .layer.rnn import SimpleRNNCell #DEFINE_ALIAS -from .layer.rnn import LSTMCell #DEFINE_ALIAS -from .layer.rnn import GRUCell #DEFINE_ALIAS -from .layer.rnn import RNN #DEFINE_ALIAS -from .layer.rnn import BiRNN #DEFINE_ALIAS -from .layer.rnn import SimpleRNN #DEFINE_ALIAS -from .layer.rnn import LSTM #DEFINE_ALIAS -from .layer.rnn import GRU #DEFINE_ALIAS - -from .layer.transformer import MultiHeadAttention -from .layer.transformer import TransformerEncoderLayer -from .layer.transformer import TransformerEncoder -from .layer.transformer import TransformerDecoderLayer -from .layer.transformer import TransformerDecoder -from .layer.transformer import Transformer -from .layer.distance import PairwiseDistance #DEFINE_ALIAS - -from .layer.vision import PixelShuffle - -from .layer.container import LayerDict #DEFINE_ALIAS - -from .layer import loss #DEFINE_ALIAS -from .layer import conv #DEFINE_ALIAS -from .layer import vision #DEFINE_ALIAS -from ..fluid.dygraph.layers import Layer #DEFINE_ALIAS -from ..fluid.dygraph.container import LayerList, ParameterList, Sequential #DEFINE_ALIAS +from .clip import ClipGradByGlobalNorm # noqa: F401 +from .clip import ClipGradByNorm # noqa: F401 +from .clip import ClipGradByValue # noqa: F401 +from .decode import BeamSearchDecoder # noqa: F401 +from .decode import dynamic_decode # noqa: F401 +from .layer.activation import ELU # noqa: F401 +from .layer.activation import GELU # noqa: F401 +from .layer.activation import Tanh # noqa: F401 +from .layer.activation import Hardshrink # noqa: F401 +from .layer.activation import Hardswish # noqa: F401 +from .layer.activation import Hardtanh # noqa: F401 +from .layer.activation import PReLU # noqa: F401 +from .layer.activation import ReLU # noqa: F401 +from .layer.activation import ReLU6 # noqa: F401 +from .layer.activation import SELU # noqa: F401 +from .layer.activation import Silu # noqa: F401 +from .layer.activation import LeakyReLU # noqa: F401 +from .layer.activation import Sigmoid # noqa: F401 +from .layer.activation import Hardsigmoid # noqa: F401 +from .layer.activation import LogSigmoid # noqa: F401 +from .layer.activation import Softmax # noqa: F401 +from .layer.activation import Softplus # noqa: F401 +from .layer.activation import Softshrink # noqa: F401 +from .layer.activation import Softsign # noqa: F401 +from .layer.activation import Swish # noqa: F401 +from .layer.activation import Tanhshrink # noqa: F401 +from .layer.activation import ThresholdedReLU # noqa: F401 +from .layer.activation import LogSoftmax # noqa: F401 +from .layer.activation import Maxout # noqa: F401 +from .layer.common import Pad1D # noqa: F401 +from .layer.common import Pad2D # noqa: F401 +from .layer.common import Pad3D # noqa: F401 +from .layer.common import CosineSimilarity # noqa: F401 +from .layer.common import Embedding # noqa: F401 +from .layer.common import Linear # noqa: F401 +from .layer.common import Flatten # noqa: F401 +from .layer.common import Upsample # noqa: F401 +from .layer.common import UpsamplingNearest2D # noqa: F401 +from .layer.common import UpsamplingBilinear2D # noqa: F401 +from .layer.common import Bilinear # noqa: F401 +from .layer.common import Dropout # noqa: F401 +from .layer.common import Dropout2D # noqa: F401 +from .layer.common import Dropout3D # noqa: F401 +from .layer.common import AlphaDropout # noqa: F401 +from .layer.common import Unfold # noqa: F401 + +from .layer.pooling import AvgPool1D # noqa: F401 +from .layer.pooling import AvgPool2D # noqa: F401 +from .layer.pooling import AvgPool3D # noqa: F401 +from .layer.pooling import MaxPool1D # noqa: F401 +from .layer.pooling import MaxPool2D # noqa: F401 +from .layer.pooling import MaxPool3D # noqa: F401 +from .layer.pooling import AdaptiveAvgPool1D # noqa: F401 +from .layer.pooling import AdaptiveAvgPool2D # noqa: F401 +from .layer.pooling import AdaptiveAvgPool3D # noqa: F401 +from .layer.pooling import AdaptiveMaxPool1D # noqa: F401 +from .layer.pooling import AdaptiveMaxPool2D # noqa: F401 +from .layer.pooling import AdaptiveMaxPool3D # noqa: F401 + +from .layer.conv import Conv1D # noqa: F401 +from .layer.conv import Conv2D # noqa: F401 +from .layer.conv import Conv3D # noqa: F401 +from .layer.conv import Conv1DTranspose # noqa: F401 +from .layer.conv import Conv2DTranspose # noqa: F401 +from .layer.conv import Conv3DTranspose # noqa: F401 + +from .layer.loss import BCEWithLogitsLoss # noqa: F401 +from .layer.loss import CrossEntropyLoss # noqa: F401 +from .layer.loss import HSigmoidLoss # noqa: F401 +from .layer.loss import MSELoss # noqa: F401 +from .layer.loss import L1Loss # noqa: F401 +from .layer.loss import NLLLoss # noqa: F401 +from .layer.loss import BCELoss # noqa: F401 +from .layer.loss import KLDivLoss # noqa: F401 +from .layer.loss import MarginRankingLoss # noqa: F401 +from .layer.loss import CTCLoss # noqa: F401 +from .layer.loss import SmoothL1Loss # noqa: F401 +from .layer.norm import BatchNorm # noqa: F401 +from .layer.norm import SyncBatchNorm # noqa: F401 +from .layer.norm import GroupNorm # noqa: F401 +from .layer.norm import LayerNorm # noqa: F401 +from .layer.norm import SpectralNorm # noqa: F401 +from .layer.norm import InstanceNorm1D # noqa: F401 +from .layer.norm import InstanceNorm2D # noqa: F401 +from .layer.norm import InstanceNorm3D # noqa: F401 +from .layer.norm import BatchNorm1D # noqa: F401 +from .layer.norm import BatchNorm2D # noqa: F401 +from .layer.norm import BatchNorm3D # noqa: F401 +from .layer.norm import LocalResponseNorm # noqa: F401 + +from .layer.rnn import RNNCellBase # noqa: F401 +from .layer.rnn import SimpleRNNCell # noqa: F401 +from .layer.rnn import LSTMCell # noqa: F401 +from .layer.rnn import GRUCell # noqa: F401 +from .layer.rnn import RNN # noqa: F401 +from .layer.rnn import BiRNN # noqa: F401 +from .layer.rnn import SimpleRNN # noqa: F401 +from .layer.rnn import LSTM # noqa: F401 +from .layer.rnn import GRU # noqa: F401 + +from .layer.transformer import MultiHeadAttention # noqa: F401 +from .layer.transformer import TransformerEncoderLayer # noqa: F401 +from .layer.transformer import TransformerEncoder # noqa: F401 +from .layer.transformer import TransformerDecoderLayer # noqa: F401 +from .layer.transformer import TransformerDecoder # noqa: F401 +from .layer.transformer import Transformer # noqa: F401 +from .layer.distance import PairwiseDistance # noqa: F401 + +from .layer.vision import PixelShuffle # noqa: F401 +from .layer.container import LayerDict # noqa: F401 + +# TODO: remove loss, keep it for too many used in unitests +from .layer import loss # noqa: F401 +from ..fluid.dygraph.layers import Layer # noqa: F401 +from ..fluid.dygraph.container import LayerList # noqa: F401 +from ..fluid.dygraph.container import ParameterList # noqa: F401 +from ..fluid.dygraph.container import Sequential # noqa: F401 + +from . import utils # noqa: F401 +from . import functional # noqa: F401 +from . import initializer # noqa: F401 + +#TODO: remove 'diag_embed', 'remove_weight_norm', 'weight_norm' months later. +import paddle.utils.deprecated as deprecated + + +@deprecated( + since="2.0.0", + update_to="paddle.nn.funcitional.diag_embed", + reason="diag_embed in paddle.nn will removed in future") +def diag_embed(*args): + ''' + alias name of paddle.nn.functional.diag_embed + ''' + return functional.diag_embed(*args) + + +@deprecated( + since="2.0.0", + update_to="paddle.nn.utils.remove_weight_norm", + reason="remove_weight_norm in paddle.nn will removed in future") +def remove_weight_norm(*args): + ''' + alias name of paddle.nn.utils.remove_weight_norm + ''' + return utils.remove_weight_norm(*args) + + +@deprecated( + since="2.0.0", + update_to="paddle.nn.utils.weight_norm", + reason="weight_norm in paddle.nn will removed in future") +def weight_norm(*args): + ''' + alias name of paddle.nn.utils.weight_norm + ''' + return utils.weight_norm(*args) + + +__all__ = [ #noqa + 'BatchNorm', + 'GroupNorm', + 'LayerNorm', + 'SpectralNorm', + 'BatchNorm1D', + 'BatchNorm2D', + 'BatchNorm3D', + 'InstanceNorm1D', + 'InstanceNorm2D', + 'InstanceNorm3D', + 'SyncBatchNorm', + 'LocalResponseNorm', + 'Embedding', + 'Linear', + 'Upsample', + 'UpsamplingNearest2D', + 'UpsamplingBilinear2D', + 'Pad1D', + 'Pad2D', + 'Pad3D', + 'CosineSimilarity', + 'Dropout', + 'Dropout2D', + 'Dropout3D', + 'Bilinear', + 'AlphaDropout', + 'Unfold' + 'RNNCellBase', + 'SimpleRNNCell', + 'LSTMCell', + 'GRUCell', + 'RNN', + 'BiRNN', + 'SimpleRNN', + 'LSTM', + 'GRU', + 'dynamic_decode', + 'MultiHeadAttention', + 'Maxout', + 'Softsign', + 'Transformer', + 'MSELoss', + 'LogSigmoid', + 'BeamSearchDecoder', + 'ClipGradByNorm', + 'ReLU', + 'PairwiseDistance', + 'BCEWithLogitsLoss', + 'SmoothL1Loss', + 'MaxPool3D', + 'AdaptiveMaxPool2D', + 'Hardshrink', + 'clip', + 'Softplus', + 'KLDivLoss', + 'clip_by_norm', + 'AvgPool2D', + 'L1Loss', + 'LeakyReLU', + 'AvgPool1D', + 'AdaptiveAvgPool3D', + 'AdaptiveMaxPool3D', + 'NLLLoss', + 'Conv1D', + 'Sequential', + 'Hardswish', + 'Conv1DTranspose', + 'AdaptiveMaxPool1D', + 'TransformerEncoder', + 'Softmax', + 'ParameterList', + 'Conv2D', + 'Softshrink', + 'Hardtanh', + 'TransformerDecoderLayer', + 'CrossEntropyLoss', + 'GELU', + 'SELU', + 'Silu', + 'Conv2DTranspose', + 'CTCLoss', + 'ThresholdedReLU', + 'AdaptiveAvgPool2D', + 'MaxPool1D', + 'Layer', + 'TransformerDecoder', + 'Conv3D', + 'Tanh', + 'Conv3DTranspose', + 'Flatten', + 'AdaptiveAvgPool1D', + 'Tanhshrink', + 'HSigmoidLoss', + 'PReLU', + 'TransformerEncoderLayer', + 'AvgPool3D', + 'MaxPool2D', + 'MarginRankingLoss', + 'LayerList', + 'ClipGradByValue', + 'BCELoss', + 'Hardsigmoid', + 'ClipGradByGlobalNorm', + 'LogSoftmax', + 'Sigmoid', + 'Swish', + 'PixelShuffle', + 'ELU', + 'ReLU6' +] diff --git a/python/paddle/nn/clip.py b/python/paddle/nn/clip.py index 9180a883e835c367530ba449a7a2dccf158dedf5..70c49b4a53876db5705d63c3df501fbd374f0a29 100644 --- a/python/paddle/nn/clip.py +++ b/python/paddle/nn/clip.py @@ -13,8 +13,6 @@ # limitations under the License. # TODO: define the functions to clip gradient of parameter -from ..fluid.clip import ClipGradByGlobalNorm #DEFINE_ALIAS -from ..fluid.clip import ClipGradByNorm #DEFINE_ALIAS -from ..fluid.clip import ClipGradByValue #DEFINE_ALIAS - -__all__ = ['ClipGradByGlobalNorm', 'ClipGradByNorm', 'ClipGradByValue'] +from ..fluid.clip import ClipGradByGlobalNorm # noqa: F401 +from ..fluid.clip import ClipGradByNorm # noqa: F401 +from ..fluid.clip import ClipGradByValue # noqa: F401 diff --git a/python/paddle/nn/decode.py b/python/paddle/nn/decode.py index bba5aba0da9ad024823750c32f2e02bb22dfbbbb..3229f0b21a6699d1b7437955af31004538a791c9 100644 --- a/python/paddle/nn/decode.py +++ b/python/paddle/nn/decode.py @@ -12,10 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ..fluid.layers import BeamSearchDecoder #DEFINE_ALIAS -from ..fluid.layers import dynamic_decode #DEFINE_ALIAS - -__all__ = [ - 'BeamSearchDecoder', - 'dynamic_decode', -] +from ..fluid.layers import BeamSearchDecoder # noqa: F401 +from ..fluid.layers import dynamic_decode # noqa: F401 diff --git a/python/paddle/nn/functional/__init__.py b/python/paddle/nn/functional/__init__.py index 98124be7288d008f573047d99c38a8b9b1236b2e..d4c17a27a61780b431916b2634585de035778ce8 100644 --- a/python/paddle/nn/functional/__init__.py +++ b/python/paddle/nn/functional/__init__.py @@ -14,211 +14,185 @@ # TODO: import all neural network related api under this directory, # including layers, linear, conv, rnn etc. -__all__ = [] -# TODO: define alias in functional directory -from . import conv -__all__ += conv.__all__ -from . import activation -__all__ += activation.__all__ -from . import extension -__all__ += extension.__all__ -from . import common -__all__ += common.__all__ -from . import pooling -__all__ += pooling.__all__ -from . import loss -__all__ += loss.__all__ -from .activation import elu #DEFINE_ALIAS -from .activation import elu_ #DEFINE_ALIAS -# from .activation import erf #DEFINE_ALIAS -from .activation import gelu #DEFINE_ALIAS -from .activation import hardshrink #DEFINE_ALIAS -from .activation import hardtanh #DEFINE_ALIAS -from .activation import hardsigmoid #DEFINE_ALIAS -from .activation import hardswish #DEFINE_ALIAS -from .activation import leaky_relu #DEFINE_ALIAS -from .activation import log_sigmoid #DEFINE_ALIAS -from .activation import maxout #DEFINE_ALIAS -from .activation import prelu #DEFINE_ALIAS -from .activation import relu #DEFINE_ALIAS -from .activation import relu_ #DEFINE_ALIAS -from .activation import relu6 #DEFINE_ALIAS -from .activation import selu #DEFINE_ALIAS -from .activation import sigmoid #DEFINE_ALIAS -from .activation import silu #DEFINE_ALIAS -# from .activation import soft_relu #DEFINE_ALIAS -from .activation import softmax #DEFINE_ALIAS -from .activation import softmax_ #DEFINE_ALIAS -from .activation import softplus #DEFINE_ALIAS -from .activation import softshrink #DEFINE_ALIAS -from .activation import softsign #DEFINE_ALIAS -from .activation import swish #DEFINE_ALIAS -from .activation import tanh #DEFINE_ALIAS -from .activation import tanh_ #DEFINE_ALIAS -from .activation import tanhshrink #DEFINE_ALIAS -from .activation import thresholded_relu #DEFINE_ALIAS -from .activation import log_softmax #DEFINE_ALIAS -from .activation import glu #DEFINE_ALIAS -from .common import dropout #DEFINE_ALIAS -from .common import dropout2d #DEFINE_ALIAS -from .common import dropout3d #DEFINE_ALIAS -from .common import alpha_dropout #DEFINE_ALIAS -# from .common import embedding #DEFINE_ALIAS -# from .common import fc #DEFINE_ALIAS -from .common import label_smooth -# from .common import one_hot #DEFINE_ALIAS -from .common import pad #DEFINE_ALIAS -# from .common import pad_constant_like #DEFINE_ALIAS -# from .common import pad2d #DEFINE_ALIAS -from .common import cosine_similarity #DEFINE_ALIAS -from .common import unfold #DEFINE_ALIAS -# from .common import bilinear_tensor_product #DEFINE_ALIAS -from .common import interpolate #DEFINE_ALIAS -from .common import upsample #DEFINE_ALIAS -from .common import bilinear #DEFINE_ALIAS -from .conv import conv1d #DEFINE_ALIAS -from .conv import conv1d_transpose #DEFINE_ALIAS -from .common import linear #DEFINE_ALIAS -from .conv import conv2d #DEFINE_ALIAS -from .conv import conv2d_transpose #DEFINE_ALIAS -from .conv import conv3d #DEFINE_ALIAS -from .conv import conv3d_transpose #DEFINE_ALIAS -# from .extension import add_position_encoding #DEFINE_ALIAS -# from .extension import autoincreased_step_counter #DEFINE_ALIAS -# from .extension import continuous_value_model #DEFINE_ALIAS -# from .extension import filter_by_instag #DEFINE_ALIAS -# from .extension import linear_chain_crf #DEFINE_ALIAS -# from .extension import merge_selected_rows #DEFINE_ALIAS -# from .extension import multiclass_nms #DEFINE_ALIAS -# from .extension import polygon_box_transform #DEFINE_ALIAS -# from .extension import random_crop #DEFINE_ALIAS -# from .extension import rpn_target_assign #DEFINE_ALIAS -# from .extension import similarity_focus #DEFINE_ALIAS -# from .extension import target_assign #DEFINE_ALIAS -# from .extension import temporal_shift #DEFINE_ALIAS -# from .extension import warpctc #DEFINE_ALIAS -from .extension import diag_embed #DEFINE_ALIAS +from .activation import elu # noqa: F401 +from .activation import elu_ # noqa: F401 +from .activation import gelu # noqa: F401 +from .activation import hardshrink # noqa: F401 +from .activation import hardtanh # noqa: F401 +from .activation import hardsigmoid # noqa: F401 +from .activation import hardswish # noqa: F401 +from .activation import leaky_relu # noqa: F401 +from .activation import log_sigmoid # noqa: F401 +from .activation import maxout # noqa: F401 +from .activation import prelu # noqa: F401 +from .activation import relu # noqa: F401 +from .activation import relu_ # noqa: F401 +from .activation import relu6 # noqa: F401 +from .activation import selu # noqa: F401 +from .activation import sigmoid # noqa: F401 +from .activation import silu # noqa: F401 +from .activation import softmax # noqa: F401 +from .activation import softmax_ # noqa: F401 +from .activation import softplus # noqa: F401 +from .activation import softshrink # noqa: F401 +from .activation import softsign # noqa: F401 +from .activation import swish # noqa: F401 +from .activation import tanh # noqa: F401 +from .activation import tanh_ # noqa: F401 +from .activation import tanhshrink # noqa: F401 +from .activation import thresholded_relu # noqa: F401 +from .activation import log_softmax # noqa: F401 +from .activation import glu # noqa: F401 +from .common import dropout # noqa: F401 +from .common import dropout2d # noqa: F401 +from .common import dropout3d # noqa: F401 +from .common import alpha_dropout # noqa: F401 +from .common import label_smooth # noqa: F401 +from .common import pad # noqa: F401 +from .common import cosine_similarity # noqa: F401 +from .common import unfold # noqa: F401 +from .common import interpolate # noqa: F401 +from .common import upsample # noqa: F401 +from .common import bilinear # noqa: F401 +from .conv import conv1d # noqa: F401 +from .conv import conv1d_transpose # noqa: F401 +from .common import linear # noqa: F401 +from .conv import conv2d # noqa: F401 +from .conv import conv2d_transpose # noqa: F401 +from .conv import conv3d # noqa: F401 +from .conv import conv3d_transpose # noqa: F401 +from .extension import diag_embed # noqa: F401 from .extension import sequence_mask -# from .lod import sequence_concat #DEFINE_ALIAS -# from .lod import sequence_conv #DEFINE_ALIAS -# from .lod import sequence_enumerate #DEFINE_ALIAS -# from .lod import sequence_expand_as #DEFINE_ALIAS -# from .lod import sequence_expand #DEFINE_ALIAS -# from .lod import sequence_first_step #DEFINE_ALIAS -# from .lod import sequence_last_step #DEFINE_ALIAS -# from .lod import sequence_mask #DEFINE_ALIAS -# from .lod import sequence_pad #DEFINE_ALIAS -# from .lod import sequence_pool #DEFINE_ALIAS -# from .lod import sequence_reshape #DEFINE_ALIAS -# from .lod import sequence_reverse #DEFINE_ALIAS -# from .lod import sequence_scatter #DEFINE_ALIAS -# from .lod import sequence_slice #DEFINE_ALIAS -# from .lod import sequence_softmax #DEFINE_ALIAS -# from .lod import sequence_unpad #DEFINE_ALIAS -# from .lod import array_length #DEFINE_ALIAS -# from .lod import array_read #DEFINE_ALIAS -# from .lod import array_write #DEFINE_ALIAS -# from .lod import create_array #DEFINE_ALIAS -# from .lod import hash #DEFINE_ALIAS -# from .lod import im2sequence #DEFINE_ALIAS -# from .lod import lod_append #DEFINE_ALIAS -# from .lod import lod_reset #DEFINE_ALIAS -# from .lod import reorder_lod_tensor_by_rank #DEFINE_ALIAS -# from .lod import tensor_array_to_tensor #DEFINE_ALIAS -# from .lod import dynamic_gru #DEFINE_ALIAS -# from .lod import dynamic_lstm #DEFINE_ALIAS -# from .lod import dynamic_lstmp #DEFINE_ALIAS -from .loss import binary_cross_entropy #DEFINE_ALIAS -from .loss import binary_cross_entropy_with_logits #DEFINE_ALIAS -# from .loss import bpr_loss #DEFINE_ALIAS -# from .loss import center_loss #DEFINE_ALIAS -#from .loss import cross_entropy #DEFINE_ALIAS -from .loss import cross_entropy #DEFINE_ALIAS -from .loss import dice_loss #DEFINE_ALIAS -from .loss import hsigmoid_loss #DEFINE_ALIAS -from .loss import kl_div #DEFINE_ALIAS -from .loss import l1_loss #DEFINE_ALIAS -from .loss import log_loss #DEFINE_ALIAS -from .loss import margin_ranking_loss #DEFINE_ALIAS -from .loss import mse_loss #DEFINE_ALIAS -from .loss import nll_loss #DEFINE_ALIAS -# from .loss import nce #DEFINE_ALIAS -from .loss import npair_loss #DEFINE_ALIAS -from .loss import sigmoid_focal_loss #DEFINE_ALIAS -# from .loss import smooth_l1 #DEFINE_ALIAS -from .loss import smooth_l1_loss #DEFINE_ALIAS -from .loss import softmax_with_cross_entropy #DEFINE_ALIAS -from .loss import square_error_cost #DEFINE_ALIAS -# from .loss import teacher_student_sigmoid_loss #DEFINE_ALIAS -from .loss import ctc_loss #DEFINE_ALIAS -# from .norm import data_norm #DEFINE_ALIAS -# from .norm import group_norm #DEFINE_ALIAS -from .norm import batch_norm #DEFINE_ALIAS -from .norm import instance_norm #DEFINE_ALIAS -from .norm import layer_norm #DEFINE_ALIAS -from .norm import local_response_norm #DEFINE_ALIAS -from .norm import normalize #DEFINE_ALIAS -# from .norm import spectral_norm #DEFINE_ALIAS -# from .pooling import pool2d #DEFINE_ALIAS -# from .pooling import pool3d #DEFINE_ALIAS -from .pooling import avg_pool1d #DEFINE_ALIAS -from .pooling import avg_pool2d #DEFINE_ALIAS -from .pooling import avg_pool3d #DEFINE_ALIAS -from .pooling import max_pool1d #DEFINE_ALIAS -from .pooling import max_pool2d #DEFINE_ALIAS -from .pooling import max_pool3d #DEFINE_ALIAS +from .loss import binary_cross_entropy # noqa: F401 +from .loss import binary_cross_entropy_with_logits # noqa: F401 +from .loss import cross_entropy # noqa: F401 +from .loss import dice_loss # noqa: F401 +from .loss import hsigmoid_loss # noqa: F401 +from .loss import kl_div # noqa: F401 +from .loss import l1_loss # noqa: F401 +from .loss import log_loss # noqa: F401 +from .loss import margin_ranking_loss # noqa: F401 +from .loss import mse_loss # noqa: F401 +from .loss import nll_loss # noqa: F401 +from .loss import npair_loss # noqa: F401 +from .loss import sigmoid_focal_loss # noqa: F401 +from .loss import smooth_l1_loss # noqa: F401 +from .loss import softmax_with_cross_entropy # noqa: F401 +from .loss import square_error_cost # noqa: F401 +from .loss import ctc_loss # noqa: F401 +from .norm import batch_norm # noqa: F401 +from .norm import instance_norm # noqa: F401 +from .norm import layer_norm # noqa: F401 +from .norm import local_response_norm # noqa: F401 +from .norm import normalize # noqa: F401 +from .pooling import avg_pool1d # noqa: F401 +from .pooling import avg_pool2d # noqa: F401 +from .pooling import avg_pool3d # noqa: F401 +from .pooling import max_pool1d # noqa: F401 +from .pooling import max_pool2d # noqa: F401 +from .pooling import max_pool3d # noqa: F401 -from .pooling import adaptive_max_pool1d #DEFINE_ALIAS -from .pooling import adaptive_max_pool2d #DEFINE_ALIAS -from .pooling import adaptive_max_pool3d #DEFINE_ALIAS -from .pooling import adaptive_avg_pool1d #DEFINE_ALIAS -from .pooling import adaptive_avg_pool2d #DEFINE_ALIAS -from .pooling import adaptive_avg_pool3d #DEFINE_ALIAS +from .pooling import adaptive_max_pool1d # noqa: F401 +from .pooling import adaptive_max_pool2d # noqa: F401 +from .pooling import adaptive_max_pool3d # noqa: F401 +from .pooling import adaptive_avg_pool1d # noqa: F401 +from .pooling import adaptive_avg_pool2d # noqa: F401 +from .pooling import adaptive_avg_pool3d # noqa: F401 -# from .rnn import rnn #DEFINE_ALIAS -# from .rnn import birnn #DEFINE_ALIAS -# from .rnn import gru_unit #DEFINE_ALIAS -# from .rnn import lstm #DEFINE_ALIAS -# from .rnn import lstm_unit #DEFINE_ALIAS -# from .vision import affine_channel #DEFINE_ALIAS -from .vision import affine_grid #DEFINE_ALIAS -# from .vision import anchor_generator #DEFINE_ALIAS -# from .vision import bipartite_match #DEFINE_ALIAS -# from .vision import box_clip #DEFINE_ALIAS -# from .vision import box_coder #DEFINE_ALIAS -# from .vision import box_decoder_and_assign #DEFINE_ALIAS -# from .vision import collect_fpn_proposals #DEFINE_ALIAS -# from .vision import deformable_conv #DEFINE_ALIAS -# from .vision import deformable_roi_pooling #DEFINE_ALIAS -# from .vision import density_prior_box #DEFINE_ALIAS -# from .vision import detection_output #DEFINE_ALIAS -# from .vision import distribute_fpn_proposals #DEFINE_ALIAS -# from .vision import fsp_matrix #DEFINE_ALIAS -# from .vision import generate_mask_labels #DEFINE_ALIAS -# from .vision import generate_proposal_labels #DEFINE_ALIAS -# from .vision import generate_proposals #DEFINE_ALIAS -from .vision import grid_sample #DEFINE_ALIAS -# from .vision import image_resize #DEFINE_ALIAS -# from .vision import image_resize_short #DEFINE_ALIAS -# from .vision import multi_box_head #DEFINE_ALIAS -from .vision import pixel_shuffle #DEFINE_ALIAS -# from .vision import prior_box #DEFINE_ALIAS -# from .vision import prroi_pool #DEFINE_ALIAS -# from .vision import psroi_pool #DEFINE_ALIAS -# from .vision import resize_bilinear #DEFINE_ALIAS -# from .vision import resize_nearest #DEFINE_ALIAS -# from .vision import resize_trilinear #DEFINE_ALIAS -# from .vision import retinanet_detection_output #DEFINE_ALIAS -# from .vision import retinanet_target_assign #DEFINE_ALIAS -# from .vision import roi_align #DEFINE_ALIAS -# from .vision import roi_perspective_transform #DEFINE_ALIAS -# from .vision import roi_pool #DEFINE_ALIAS -# from .vision import shuffle_channel #DEFINE_ALIAS -# from .vision import space_to_depth #DEFINE_ALIAS -# from .vision import yolo_box #DEFINE_ALIAS -# from .vision import yolov3_loss #DEFINE_ALIAS -from .input import one_hot #DEFINE_ALIAS -from .input import embedding #DEFINE_ALIAS -from ...fluid.layers import gather_tree -from ...fluid.layers import temporal_shift +from .vision import affine_grid # noqa: F401 +from .vision import grid_sample # noqa: F401 +from .vision import pixel_shuffle # noqa: F401 +from .input import one_hot # noqa: F401 +from .input import embedding # noqa: F401 +from ...fluid.layers import gather_tree # noqa: F401 +from ...fluid.layers import temporal_shift # noqa: F401 + +__all__ = [ #noqa + 'conv1d', + 'conv1d_transpose', + 'conv2d', + 'conv2d_transpose', + 'conv3d', + 'conv3d_transpose', + 'elu', + 'elu_', + 'gelu', + 'hardshrink', + 'hardtanh', + 'hardsigmoid', + 'hardswish', + 'leaky_relu', + 'log_sigmoid', + 'maxout', + 'prelu', + 'relu', + 'relu_', + 'relu6', + 'selu', + 'softmax', + 'softmax_', + 'softplus', + 'softshrink', + 'softsign', + 'sigmoid', + 'silu', + 'swish', + 'tanh', + 'tanh_', + 'tanhshrink', + 'thresholded_relu', + 'log_softmax', + 'glu', + 'diag_embed', + 'sequence_mask', + 'dropout', + 'dropout2d', + 'dropout3d', + 'alpha_dropout', + 'label_smooth', + 'linear', + 'pad', + 'unfold', + 'interpolate', + 'upsample', + 'bilinear', + 'cosine_similarity', + 'avg_pool1d', + 'avg_pool2d', + 'avg_pool3d', + 'max_pool1d', + 'max_pool2d', + 'max_pool3d', + 'adaptive_avg_pool1d', + 'adaptive_avg_pool2d', + 'adaptive_avg_pool3d', + 'adaptive_max_pool1d', + 'adaptive_max_pool2d', + 'adaptive_max_pool3d', + 'binary_cross_entropy', + 'binary_cross_entropy_with_logits', + 'cross_entropy', + 'dice_loss', + 'hsigmoid_loss', + 'kl_div', + 'l1_loss', + 'log_loss', + 'mse_loss', + 'margin_ranking_loss', + 'nll_loss', + 'npair_loss', + 'sigmoid_focal_loss', + 'smooth_l1_loss', + 'softmax_with_cross_entropy', + 'square_error_cost', + 'ctc_loss', + 'affine_grid', + 'grid_sample', + 'local_response_norm', + 'pixel_shuffle', + 'embedding', + 'gather_tree', + 'one_hot', + 'normalize' +] diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index d74308dc9aa32a0960c59f2a2ff20e29741a4c88..cd8ee99baa23742b0c7c1ad5f20c7ce17e339790 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -12,53 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -# TODO: define activation functions of neural network -from ...fluid.layers import brelu #DEFINE_ALIAS -# from ...fluid.layers import erf #DEFINE_ALIAS -from ...fluid.layers import maxout #DEFINE_ALIAS -# from ...fluid.layers import soft_relu #DEFINE_ALIAS -from ...fluid.layers import swish #DEFINE_ALIAS -from ...fluid.layers import sigmoid #DEFINE_ALIAS -from ...tensor.math import tanh #DEFINE_ALIAS -from ...tensor.math import tanh_ #DEFINE_ALIAS +from ...fluid.layers import sigmoid # noqa: F401 +from ...tensor.math import tanh # noqa: F401 +from ...tensor.math import tanh_ # noqa: F401 from ...tensor.manipulation import _print_warning_in_static_mode from ...tensor.manipulation import chunk from ...tensor.math import multiply -__all__ = [ - 'brelu', - 'elu', - 'elu_', - 'gelu', - 'hardshrink', - 'hardtanh', - 'hardsigmoid', - 'hardswish', - 'leaky_relu', - 'log_sigmoid', - 'maxout', - 'prelu', - 'relu', - 'relu_', - 'relu6', - 'selu', - 'softmax', - 'softmax_', - 'softplus', - 'softshrink', - 'softsign', - 'sigmoid', - 'silu' - 'swish', - 'tanh', - 'tanh_', - 'tanhshrink', - 'thresholded_relu', - 'log_softmax', - 'glu', -] - import warnings from ...fluid.layer_helper import LayerHelper from ...fluid.framework import in_dygraph_mode, convert_np_dtype_to_dtype_ diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index 1cc8ef6c39b15e348bc76cb014bde82b1c12e9bf..7379c7a5f67bd0a0a5661f5da41c46dca3bb16fb 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -20,44 +20,20 @@ from paddle.fluid.layers.tensor import Variable, fill_constant, zeros, concat from ...fluid.layers import core from ...fluid import dygraph_utils # TODO: define the common functions to build a neural network -# from ...fluid import one_hot #DEFINE_ALIAS -# from ...fluid.layers import pad2d #DEFINE_ALIAS -from ...fluid.layers import unfold #DEFINE_ALIAS -from ...fluid.layers import squeeze #DEFINE_ALIAS -from ...fluid.layers import unsqueeze #DEFINE_ALIAS +from ...fluid.layers import unfold # noqa: F401 +from ...fluid.layers import squeeze +from ...fluid.layers import unsqueeze from ...tensor import clip from ...tensor import sum from ...tensor import sqrt -from ...tensor import sum #DEFINE_ALIAS -from ...tensor import sqrt #DEFINE_ALIAS from ...fluid.data_feeder import check_variable_and_dtype, check_dtype from ...fluid.framework import Variable, in_dygraph_mode, _varbase_creator -#from ...fluid.layers import fc #DEFINE_ALIAS -# from ...fluid.layers import pad_constant_like #DEFINE_ALIAS from ...fluid.framework import in_dygraph_mode from ...fluid import core, dygraph_utils from ...fluid import core, layers from ...fluid.data_feeder import check_variable_and_dtype -__all__ = [ - 'dropout', - 'dropout2d', - 'dropout3d', - 'alpha_dropout', - # 'embedding', - # 'fc', - 'label_smooth', - 'linear', - 'pad', - 'unfold', - # 'bilinear_tensor_product', - 'interpolate', - 'upsample', - 'bilinear', - 'cosine_similarity', -] - def interpolate(x, size=None, diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index a8d6a6cc38df2d8fec42f675bec814c8f7518d34..800c8204973723be905599b35d63d993bebbba94 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -13,15 +13,6 @@ # limitations under the License. from __future__ import print_function -__all__ = [ - 'conv1d', - 'conv1d_transpose', - 'conv2d', - 'conv2d_transpose', - 'conv3d', - 'conv3d_transpose', -] - import numpy as np from ...device import get_cudnn_version from ...fluid.framework import Variable, in_dygraph_mode diff --git a/python/paddle/nn/functional/extension.py b/python/paddle/nn/functional/extension.py index b004d79a877e7edbccc2245e5056753ed6b14bfb..7900f903e7fd2f5bc298d18b28fd602f309238e9 100644 --- a/python/paddle/nn/functional/extension.py +++ b/python/paddle/nn/functional/extension.py @@ -14,8 +14,6 @@ # TODO: define the extention functions -__all__ = ['diag_embed', 'sequence_mask'] - import numpy as np from ...fluid.data_feeder import check_dtype from ...fluid.layer_helper import LayerHelper diff --git a/python/paddle/nn/functional/input.py b/python/paddle/nn/functional/input.py index b88a2b042ff48160c12aedcca6f12591c154cd0e..4fff9cda4be33abd5db3384837efc84c7a1063a7 100644 --- a/python/paddle/nn/functional/input.py +++ b/python/paddle/nn/functional/input.py @@ -19,8 +19,6 @@ from ...fluid.layer_helper import LayerHelper from ...fluid.layers import core from ...fluid.data_feeder import check_variable_and_dtype, check_dtype -__all__ = ['one_hot', 'embedding'] - def one_hot(x, num_classes, name=None): """ diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index ca0ad06532d27e6508cb3eabcfb82de70785f8df..bb2d8005f4e3186d0531ca2516e07d169f76afc5 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -24,14 +24,14 @@ import paddle import paddle.fluid as fluid from ...fluid.framework import core, in_dygraph_mode from ...fluid.layers.nn import _elementwise_op_in_dygraph -from ...fluid.layers import dice_loss #DEFINE_ALIAS -from ...fluid.layers import log_loss #DEFINE_ALIAS -from ...fluid.layers import npair_loss #DEFINE_ALIAS +from ...fluid.layers import dice_loss # noqa: F401 +from ...fluid.layers import log_loss # noqa: F401 +from ...fluid.layers import npair_loss # noqa: F401 from ...fluid.layers import reshape -from ...fluid.layers import softmax_with_cross_entropy as fluid_softmax_with_cross_entropy #DEFINE_ALIAS -from ...fluid.layers import square_error_cost #DEFINE_ALIAS +from ...fluid.layers import softmax_with_cross_entropy as fluid_softmax_with_cross_entropy +from ...fluid.layers import square_error_cost # noqa: F401 -from ...fluid.layers import edit_distance #DEFINE_ALIAS +from ...fluid.layers import edit_distance # noqa: F401 from ...fluid.layers import huber_loss from ...fluid.layer_helper import LayerHelper from ...fluid.framework import in_dygraph_mode @@ -39,27 +39,6 @@ from ...fluid.framework import _varbase_creator from ...fluid.framework import Variable from paddle.utils import deprecated -__all__ = [ - 'binary_cross_entropy', - 'binary_cross_entropy_with_logits', - 'cross_entropy', - 'dice_loss', - 'hsigmoid_loss', - 'kl_div', - 'l1_loss', - 'log_loss', - 'mse_loss', - 'margin_ranking_loss', - # 'nce', - 'nll_loss', - 'npair_loss', - 'sigmoid_focal_loss', - 'smooth_l1_loss', - 'softmax_with_cross_entropy', - 'square_error_cost', - 'ctc_loss', -] - def binary_cross_entropy(input, label, weight=None, reduction='mean', name=None): @@ -1312,7 +1291,7 @@ def cross_entropy(input, Indicate whether compute softmax before cross_entropy. Default is ``True``. - - **name** (str,optional) + - **name** (str, optional) The name of the operator. Default is ``None`` . For more information, please refer to :ref:`api_guide_Name` . diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index 73df03e3714c7926f267bbe6e105096964d8f3b0..dddc4c66d591ccbe86722492a571fd7d65570559 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -22,19 +22,8 @@ from ...framework import create_parameter from ...fluid.initializer import Constant from ...fluid.param_attr import ParamAttr from ...fluid import core, dygraph_utils - import numbers -__all__ = [ - 'batch_norm', - # 'data_norm', - 'instance_norm', - 'layer_norm', - 'local_response_norm', - 'normalize', - # 'spectral_norm' -] - def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): r""" diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index 5f3642710ae0adfbdb53f7b5adc81c8b8395a924..27a66c629cafaa283346b07750392710d525a32c 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -18,21 +18,6 @@ from ...fluid.framework import in_dygraph_mode from ...fluid.layers import utils, LayerHelper, unsqueeze, squeeze from ...fluid.data_feeder import check_type, check_variable_and_dtype -__all__ = [ - 'avg_pool1d', - 'avg_pool2d', - 'avg_pool3d', - 'max_pool1d', - 'max_pool2d', - 'max_pool3d', - 'adaptive_avg_pool1d', - 'adaptive_avg_pool2d', - 'adaptive_avg_pool3d', - 'adaptive_max_pool1d', - 'adaptive_max_pool2d', - 'adaptive_max_pool3d', -] - def _is_list_or_tuple(input): return isinstance(input, (list, tuple)) diff --git a/python/paddle/nn/functional/vision.py b/python/paddle/nn/functional/vision.py index 032d5b47eda077ab3a1cb9123d958568bf20617a..cb8a817023d22c243349f564bb2bd13f072da957 100644 --- a/python/paddle/nn/functional/vision.py +++ b/python/paddle/nn/functional/vision.py @@ -19,43 +19,6 @@ from ...fluid.data_feeder import check_variable_and_dtype from ...fluid import dygraph_utils import numpy as np -# TODO: define specitial functions used in computer vision task -# from ...fluid.layers import affine_channel #DEFINE_ALIAS -# from ...fluid.layers import anchor_generator #DEFINE_ALIAS -# from ...fluid.layers import bipartite_match #DEFINE_ALIAS -# from ...fluid.layers import box_clip #DEFINE_ALIAS -# from ...fluid.layers import box_coder #DEFINE_ALIAS -# from ...fluid.layers import box_decoder_and_assign #DEFINE_ALIAS -# from ...fluid.layers import collect_fpn_proposals #DEFINE_ALIAS -# from ...fluid.layers import deformable_roi_pooling #DEFINE_ALIAS -# from ...fluid.layers import density_prior_box #DEFINE_ALIAS -# from ...fluid.layers import detection_output #DEFINE_ALIAS -# from ...fluid.layers import distribute_fpn_proposals #DEFINE_ALIAS -# from ...fluid.layers import generate_mask_labels #DEFINE_ALIAS -# from ...fluid.layers import generate_proposal_labels #DEFINE_ALIAS -# from ...fluid.layers import generate_proposals #DEFINE_ALIAS -# from ...fluid.layers import image_resize #DEFINE_ALIAS -# from ...fluid.layers import prior_box #DEFINE_ALIAS -# from ...fluid.layers import prroi_pool #DEFINE_ALIAS -# from ...fluid.layers import psroi_pool #DEFINE_ALIAS -# from ...fluid.layers import resize_bilinear #DEFINE_ALIAS -# from ...fluid.layers import resize_nearest #DEFINE_ALIAS -# from ...fluid.layers import resize_trilinear #DEFINE_ALIAS -# from ...fluid.layers import roi_align #DEFINE_ALIAS -# from ...fluid.layers import roi_pool #DEFINE_ALIAS -# from ...fluid.layers import space_to_depth #DEFINE_ALIAS -# from ...fluid.layers import yolo_box #DEFINE_ALIAS -# from ...fluid.layers import yolov3_loss #DEFINE_ALIAS -# from ...fluid.layers import fsp_matrix #DEFINE_ALIAS -# from ...fluid.layers import image_resize_short #DEFINE_ALIAS -# from ...fluid.layers import pixel_shuffle #DEFINE_ALIAS -# from ...fluid.layers import retinanet_detection_output #DEFINE_ALIAS -# from ...fluid.layers import retinanet_target_assign #DEFINE_ALIAS -# from ...fluid.layers import roi_perspective_transform #DEFINE_ALIAS -# from ...fluid.layers import shuffle_channel #DEFINE_ALIAS - -__all__ = ['affine_grid', 'grid_sample', 'pixel_shuffle'] - def affine_grid(theta, out_shape, align_corners=True, name=None): """ diff --git a/python/paddle/nn/initializer/__init__.py b/python/paddle/nn/initializer/__init__.py index c128a1b401b2d85c8076d773c441c2182b8327a4..03e91f80dd139ced85c8a0e011d776e80d43f5a8 100644 --- a/python/paddle/nn/initializer/__init__.py +++ b/python/paddle/nn/initializer/__init__.py @@ -13,36 +13,34 @@ # limitations under the License. # TODO: define the initializers to create a Parameter in neural network -from ...fluid.initializer import Bilinear #DEFINE_ALIAS -from ...fluid.initializer import set_global_initializer #DEFINE_ALIAS +from ...fluid.initializer import Bilinear # noqa: F401 +from ...fluid.initializer import set_global_initializer # noqa: F401 -from . import constant -from .constant import Constant #DEFINE_ALIAS +from .constant import Constant # noqa: F401 -from . import kaiming -from .kaiming import KaimingNormal #DEFINE_ALIAS -from .kaiming import KaimingUniform #DEFINE_ALIAS +from .kaiming import KaimingNormal # noqa: F401 +from .kaiming import KaimingUniform # noqa: F401 -__all__ = ['Bilinear', 'set_global_initializer'] +from .xavier import XavierNormal # noqa: F401 +from .xavier import XavierUniform # noqa: F401 -__all__ += constant.__all__ -__all__ += kaiming.__all__ +from .assign import Assign # noqa: F401 -from . import xavier -from .xavier import XavierNormal #DEFINE_ALIAS -from .xavier import XavierUniform #DEFINE_ALIAS +from .normal import Normal # noqa: F401 +from .normal import TruncatedNormal # noqa: F401 -from . import assign -from .assign import Assign #DEFINE_ALIAS +from .uniform import Uniform # noqa: F401 -from . import normal -from .normal import Normal #DEFINE_ALIAS -from .normal import TruncatedNormal #DEFINE_ALIAS - -from . import uniform -from .uniform import Uniform #DEFINE_ALIAS - -__all__ += xavier.__all__ -__all__ += assign.__all__ -__all__ += normal.__all__ -__all__ += uniform.__all__ +__all__ = [ #noqa + 'Bilinear', + 'Constant', + 'KaimingUniform', + 'KaimingNormal', + 'XavierNormal', + 'XavierUniform', + 'Assign', + 'Normal', + 'TruncatedNormal', + 'Uniform', + 'set_global_initializer' +] diff --git a/python/paddle/nn/initializer/assign.py b/python/paddle/nn/initializer/assign.py index 94c4ddc1938823653db6ef78a823430928c724ff..642919f3540753f499190a16637ae67836f36d9b 100644 --- a/python/paddle/nn/initializer/assign.py +++ b/python/paddle/nn/initializer/assign.py @@ -19,8 +19,6 @@ from ...fluid.core import VarDesc from ...fluid.data_feeder import check_type from ...fluid.initializer import NumpyArrayInitializer -__all__ = ['Assign'] - class Assign(NumpyArrayInitializer): """Init an parameter with a numpy array, list, or tensor. diff --git a/python/paddle/nn/initializer/constant.py b/python/paddle/nn/initializer/constant.py index 6d21ddae0d16b5003bc6766b4106dd937727c2b1..aec3e82aab62b7591d93b83fea137147672f820c 100644 --- a/python/paddle/nn/initializer/constant.py +++ b/python/paddle/nn/initializer/constant.py @@ -15,8 +15,6 @@ # TODO: define the initializers of Constant in neural network from ...fluid.initializer import ConstantInitializer -__all__ = ['Constant'] - class Constant(ConstantInitializer): """Implement the constant initializer. diff --git a/python/paddle/nn/initializer/kaiming.py b/python/paddle/nn/initializer/kaiming.py index 7e2b6f787f85316c9ad4c3bedf91eef3b19cd50d..712bffccda102fc5ca1edaf85c641850027a4f01 100644 --- a/python/paddle/nn/initializer/kaiming.py +++ b/python/paddle/nn/initializer/kaiming.py @@ -15,8 +15,6 @@ # TODO: define the initializers of Kaiming functions in neural network from ...fluid.initializer import MSRAInitializer -__all__ = ['KaimingUniform', 'KaimingNormal'] - class KaimingNormal(MSRAInitializer): r"""Implements the Kaiming Normal initializer diff --git a/python/paddle/nn/initializer/normal.py b/python/paddle/nn/initializer/normal.py index a572d0e2c9216040e3ffa7e1c02841ebc1fc33ae..c009df780054ed9b727479d4029d002a3b1a521a 100644 --- a/python/paddle/nn/initializer/normal.py +++ b/python/paddle/nn/initializer/normal.py @@ -15,8 +15,6 @@ from ...fluid.initializer import NormalInitializer from ...fluid.initializer import TruncatedNormalInitializer -__all__ = ['Normal', 'TruncatedNormal'] - class Normal(NormalInitializer): """The Random Normal (Gaussian) distribution initializer. diff --git a/python/paddle/nn/initializer/uniform.py b/python/paddle/nn/initializer/uniform.py index a5d7d34efcf664a5bd46d7d3f06e2c542a8b4ef9..e54a4d2187b8d7742ab2a877a9faee97abc33d72 100644 --- a/python/paddle/nn/initializer/uniform.py +++ b/python/paddle/nn/initializer/uniform.py @@ -14,8 +14,6 @@ from ...fluid.initializer import UniformInitializer -__all__ = ['Uniform'] - class Uniform(UniformInitializer): """The random uniform distribution initializer. diff --git a/python/paddle/nn/initializer/xavier.py b/python/paddle/nn/initializer/xavier.py index 821a6984753105162e878c879cd5b960d2aa80e1..01a4a8887b489ab3492603d365da09ae5eb7ca22 100644 --- a/python/paddle/nn/initializer/xavier.py +++ b/python/paddle/nn/initializer/xavier.py @@ -14,8 +14,6 @@ from ...fluid.initializer import XavierInitializer -__all__ = ['XavierNormal', 'XavierUniform'] - class XavierNormal(XavierInitializer): r""" diff --git a/python/paddle/nn/layer/__init__.py b/python/paddle/nn/layer/__init__.py index 17c4ca5c5d11d20490c25616176f71dcf46c389c..64f0391fb6533e817f48a5cd9c626751856c58f6 100644 --- a/python/paddle/nn/layer/__init__.py +++ b/python/paddle/nn/layer/__init__.py @@ -14,90 +14,70 @@ # TODO: define activation functions of neural network -from . import activation -from . import loss -from . import conv -from . import activation -from . import norm -from . import rnn -from . import vision -from . import distance -from . import transformer -from . import container +from . import rnn # noqa: F401 +from . import transformer # noqa: F401 +from . import container # noqa: F401 -from .activation import * -from .loss import * -from .conv import * -from .activation import * -from .norm import * -from .rnn import * -from .vision import * +from .activation import PReLU # noqa: F401 +from .activation import ReLU # noqa: F401 +from .activation import ReLU6 # noqa: F401 +from .activation import LeakyReLU # noqa: F401 +from .activation import Sigmoid # noqa: F401 +from .activation import Softmax # noqa: F401 +from .activation import LogSoftmax # noqa: F401 +from .common import Bilinear # noqa: F401 +from .common import Pad1D # noqa: F401 +from .common import Pad2D # noqa: F401 +from .common import Pad3D # noqa: F401 +from .common import CosineSimilarity # noqa: F401 +from .common import Embedding # noqa: F401 +from .common import Linear # noqa: F401 +from .common import Flatten # noqa: F401 +from .common import Upsample # noqa: F401 +from .common import Dropout # noqa: F401 +from .common import Dropout2D # noqa: F401 +from .common import Dropout3D # noqa: F401 +from .common import AlphaDropout # noqa: F401 +from .common import Upsample # noqa: F401 +from .common import UpsamplingBilinear2D # noqa: F401 +from .common import UpsamplingNearest2D # noqa: F401 +from .pooling import AvgPool1D # noqa: F401 +from .pooling import AvgPool2D # noqa: F401 +from .pooling import AvgPool3D # noqa: F401 +from .pooling import MaxPool1D # noqa: F401 +from .pooling import MaxPool2D # noqa: F401 +from .pooling import MaxPool3D # noqa: F401 +from .pooling import AdaptiveAvgPool1D # noqa: F401 +from .pooling import AdaptiveAvgPool2D # noqa: F401 +from .pooling import AdaptiveAvgPool3D # noqa: F401 +from .pooling import AdaptiveMaxPool1D # noqa: F401 +from .pooling import AdaptiveMaxPool2D # noqa: F401 +from .pooling import AdaptiveMaxPool3D # noqa: F401 +from .conv import Conv1D # noqa: F401 +from .conv import Conv2D # noqa: F401 +from .conv import Conv3D # noqa: F401 +from .conv import Conv1DTranspose # noqa: F401 +from .conv import Conv2DTranspose # noqa: F401 +from .conv import Conv3DTranspose # noqa: F401 +from .loss import BCEWithLogitsLoss # noqa: F401 +from .loss import CrossEntropyLoss # noqa: F401 +from .loss import MSELoss # noqa: F401 +from .loss import L1Loss # noqa: F401 +from .loss import NLLLoss # noqa: F401 +from .loss import BCELoss # noqa: F401 +from .loss import KLDivLoss # noqa: F401 +from .loss import MarginRankingLoss # noqa: F401 +from .loss import CTCLoss # noqa: F401 +from .loss import SmoothL1Loss # noqa: F401 +from .norm import BatchNorm1D # noqa: F401 +from .norm import BatchNorm2D # noqa: F401 +from .norm import BatchNorm3D # noqa: F401 +from .norm import SyncBatchNorm # noqa: F401 +from .norm import GroupNorm # noqa: F401 +from .norm import LayerNorm # noqa: F401 +from .norm import SpectralNorm # noqa: F401 +from .norm import LocalResponseNorm # noqa: F401 -from .transformer import * -from .activation import PReLU #DEFINE_ALIAS -from .activation import ReLU #DEFINE_ALIAS -from .activation import LeakyReLU #DEFINE_ALIAS -from .activation import Sigmoid #DEFINE_ALIAS -from .activation import Softmax #DEFINE_ALIAS -from .activation import LogSoftmax #DEFINE_ALIAS -from .common import Bilinear #DEFINE_ALIAS -from .common import Pad1D #DEFINE_ALIAS -from .common import Pad2D #DEFINE_ALIAS -from .common import Pad3D #DEFINE_ALIAS -from .common import CosineSimilarity #DEFINE_ALIAS -from .common import Embedding #DEFINE_ALIAS -from .common import Linear #DEFINE_ALIAS -from .common import Flatten #DEFINE_ALIAS -from .common import Upsample #DEFINE_ALIAS -from .common import Dropout #DEFINE_ALIAS -from .common import Dropout2D #DEFINE_ALIAS -from .common import Dropout3D #DEFINE_ALIAS -from .common import AlphaDropout #DEFINE_ALIAS -from .common import Upsample #DEFINE_ALIAS -from .common import UpsamplingBilinear2D #DEFINE_ALIAS -from .common import UpsamplingNearest2D #DEFINE_ALIAS -from .pooling import AvgPool1D #DEFINE_ALIAS -from .pooling import AvgPool2D #DEFINE_ALIAS -from .pooling import AvgPool3D #DEFINE_ALIAS -from .pooling import MaxPool1D #DEFINE_ALIAS -from .pooling import MaxPool2D #DEFINE_ALIAS -from .pooling import MaxPool3D #DEFINE_ALIAS -from .pooling import AdaptiveAvgPool1D #DEFINE_ALIAS -from .pooling import AdaptiveAvgPool2D #DEFINE_ALIAS -from .pooling import AdaptiveAvgPool3D #DEFINE_ALIAS -from .pooling import AdaptiveMaxPool1D #DEFINE_ALIAS -from .pooling import AdaptiveMaxPool2D #DEFINE_ALIAS -from .pooling import AdaptiveMaxPool3D #DEFINE_ALIAS -from .conv import Conv1D #DEFINE_ALIAS -from .conv import Conv2D #DEFINE_ALIAS -from .conv import Conv3D #DEFINE_ALIAS -from .conv import Conv1DTranspose #DEFINE_ALIAS -from .conv import Conv2DTranspose #DEFINE_ALIAS -from .conv import Conv3DTranspose #DEFINE_ALIAS -# from .conv import TreeConv #DEFINE_ALIAS -# from .conv import Conv1D #DEFINE_ALIAS -# from .loss import NCELoss #DEFINE_ALIAS -from .loss import BCEWithLogitsLoss #DEFINE_ALIAS -from .loss import CrossEntropyLoss #DEFINE_ALIAS -from .loss import MSELoss #DEFINE_ALIAS -from .loss import L1Loss #DEFINE_ALIAS -from .loss import NLLLoss #DEFINE_ALIAS -from .loss import BCELoss #DEFINE_ALIAS -from .loss import KLDivLoss #DEFINE_ALIAS -from .loss import MarginRankingLoss #DEFINE_ALIAS -from .loss import CTCLoss #DEFINE_ALIAS -from .loss import SmoothL1Loss #DEFINE_ALIAS -from .norm import BatchNorm #DEFINE_ALIAS -from .norm import SyncBatchNorm #DEFINE_ALIAS -from .norm import GroupNorm #DEFINE_ALIAS -from .norm import LayerNorm #DEFINE_ALIAS -from .norm import SpectralNorm #DEFINE_ALIAS -#from .norm import InstanceNorm #DEFINE_ALIAS -from .norm import LocalResponseNorm #DEFINE_ALIAS -# from .rnn import RNNCell #DEFINE_ALIAS -# from .rnn import GRUCell #DEFINE_ALIAS -# from .rnn import LSTMCell #DEFINE_ALIAS - -from .vision import PixelShuffle #DEFINE_ALIAS -from .distance import PairwiseDistance #DEFINE_ALIAS -from .container import LayerDict #DEFINE_ALIAS +from .vision import PixelShuffle # noqa: F401 +from .distance import PairwiseDistance # noqa: F401 +from .container import LayerDict # noqa: F401 diff --git a/python/paddle/nn/layer/activation.py b/python/paddle/nn/layer/activation.py index 2a9ae310615ce4bb8ed43a0fc014524fa583ccee..c6ce4588ea5dab1f8095b9db83d6fb0b4d503ebe 100644 --- a/python/paddle/nn/layer/activation.py +++ b/python/paddle/nn/layer/activation.py @@ -14,33 +14,6 @@ # TODO: define activation functions of neural network -__all__ = [ - 'ELU', - 'GELU', - 'Hardshrink', - 'Hardswish', - 'Tanh', - 'Hardtanh', - 'PReLU', - 'ReLU', - 'ReLU6', - 'SELU', - 'LeakyReLU', - 'Sigmoid', - 'Silu', - 'Hardsigmoid', - 'Softmax', - 'Softplus', - 'Softshrink', - 'Softsign', - 'Swish', - 'Tanhshrink', - 'ThresholdedReLU', - 'LogSigmoid', - 'LogSoftmax', - 'Maxout', -] - from ...fluid.dygraph import layers from ...fluid import core from ...fluid.framework import in_dygraph_mode diff --git a/python/paddle/nn/layer/common.py b/python/paddle/nn/layer/common.py index 8c001793715e511c40557d0865fcc299a6a248a4..058507ba5dec36ac6dba964e662e5176418b18ed 100644 --- a/python/paddle/nn/layer/common.py +++ b/python/paddle/nn/layer/common.py @@ -14,30 +14,12 @@ # TODO: define the common classes to build a neural network import paddle -from ...fluid.dygraph import Flatten #DEFINE_ALIAS +from ...fluid.dygraph import Flatten # noqa: F401 from ...fluid.dygraph import layers from ...fluid.framework import in_dygraph_mode from .. import functional as F from ...fluid.framework import _dygraph_tracer -__all__ = [ - 'Embedding', - 'Linear', - 'Upsample', - 'Pad1D', - 'Pad2D', - 'Pad3D', - 'UpsamplingNearest2D', - 'UpsamplingBilinear2D', - 'CosineSimilarity', - 'Dropout', - 'Dropout2D', - 'Dropout3D', - 'Bilinear', - 'AlphaDropout', - 'Unfold', -] - def _npairs(x, n): if isinstance(x, (paddle.Tensor, list)): diff --git a/python/paddle/nn/layer/conv.py b/python/paddle/nn/layer/conv.py index d6ba04dad04c796b110c2948e9cf5dcefae46afe..2360dc17cf17108f7bbb1b1620632002842a8269 100644 --- a/python/paddle/nn/layer/conv.py +++ b/python/paddle/nn/layer/conv.py @@ -14,15 +14,6 @@ # TODO: define classes of convolutional neural network -__all__ = [ - 'Conv1D', - 'Conv2D', - 'Conv3D', - 'Conv1DTranspose', - 'Conv2DTranspose', - 'Conv3DTranspose', -] - import numpy as np from ...fluid import get_flags diff --git a/python/paddle/nn/layer/distance.py b/python/paddle/nn/layer/distance.py index 72e0a1b2d6d2009e0edb2674b13299460996c104..7eb0fc1fbb5755dbbc4f77493d2affb7247e61b7 100644 --- a/python/paddle/nn/layer/distance.py +++ b/python/paddle/nn/layer/distance.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -__all__ = ['PairwiseDistance'] - import numpy as np import paddle diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 2dfb3acca68e16fb95efb012895bf935cee11440..356b22c632cf5199319e3d46ba764f04833632c3 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -21,20 +21,6 @@ import paddle from .. import functional as F from paddle.fluid.framework import core, in_dygraph_mode, _varbase_creator -__all__ = [ - 'BCEWithLogitsLoss', - 'CrossEntropyLoss', - 'HSigmoidLoss', - 'MSELoss', - 'L1Loss', - 'NLLLoss', - 'BCELoss', - 'KLDivLoss', - 'MarginRankingLoss', - 'CTCLoss', - 'SmoothL1Loss', -] - class BCEWithLogitsLoss(fluid.dygraph.Layer): r""" @@ -295,7 +281,7 @@ class CrossEntropyLoss(fluid.dygraph.Layer): Indicate whether compute softmax before cross_entropy. Default is ``True``. - - **name** (str,optional) + - **name** (str, optional) The name of the operator. Default is ``None`` . For more information, please refer to :ref:`api_guide_Name` . @@ -318,7 +304,7 @@ class CrossEntropyLoss(fluid.dygraph.Layer): - **label** (Tensor) - 1. If soft_label=False,the shape is + 1. If soft_label=False, the shape is :math:`[N_1, N_2, ..., N_k]` or :math:`[N_1, N_2, ..., N_k, 1]`, k >= 1. the data type is int32, int64, float32, float64, where each value is [0, C-1]. diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 0b0b2bf7b9b272ff5a41647b5d711b0069b2892a..970d68e8263432105349e9dfc699d0a0b2ea1115 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -28,13 +28,10 @@ # TODO: define normalization api import six -#from ...fluid.dygraph.nn import InstanceNorm -from ...fluid.dygraph import BatchNorm #DEFINE_ALIAS -#from ...fluid.dygraph import GroupNorm #DEFINE_ALIAS +from ...fluid.dygraph import BatchNorm # noqa: F401 -#from ...fluid.dygraph import LayerNorm #DEFINE_ALIAS -from ...fluid.dygraph import SpectralNorm #DEFINE_ALIAS +from ...fluid.dygraph import SpectralNorm # noqa: F401 from ...fluid.dygraph import layers from ...framework import get_default_dtype, set_default_dtype @@ -53,12 +50,6 @@ import warnings from ...fluid.dygraph.base import no_grad from .. import functional as F -__all__ = [ - 'BatchNorm', 'GroupNorm', 'LayerNorm', 'SpectralNorm', 'BatchNorm1D', - 'BatchNorm2D', 'BatchNorm3D', 'InstanceNorm1D', 'InstanceNorm2D', - 'InstanceNorm3D', 'SyncBatchNorm', 'LocalResponseNorm' -] - class _InstanceNormBase(layers.Layer): """ diff --git a/python/paddle/nn/layer/pooling.py b/python/paddle/nn/layer/pooling.py index cdb87a1cb3920748127032aef31f7710bdfc5641..5916fd7c69eb0be56dbeef2feaa6d4f8bb969b15 100755 --- a/python/paddle/nn/layer/pooling.py +++ b/python/paddle/nn/layer/pooling.py @@ -16,21 +16,6 @@ from ...fluid.dygraph import layers from ...fluid.layer_helper import LayerHelper from .. import functional as F -__all__ = [ - 'AvgPool1D', - 'AvgPool2D', - 'AvgPool3D', - 'MaxPool1D', - 'MaxPool2D', - 'MaxPool3D', - 'AdaptiveAvgPool1D', - 'AdaptiveAvgPool2D', - 'AdaptiveAvgPool3D', - 'AdaptiveMaxPool1D', - 'AdaptiveMaxPool2D', - 'AdaptiveMaxPool3D', -] - class AvgPool1D(layers.Layer): r""" diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index 964cfa74ebf0881cb7ceeed4c0e37d64f5005f98..a7539b5b09571c843247e1fce61d170aa71d91a0 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -33,18 +33,6 @@ from paddle.fluid.layers import utils from paddle.fluid.layers.utils import map_structure, flatten, pack_sequence_as from paddle.fluid.data_feeder import convert_dtype -__all__ = [ - 'RNNCellBase', - 'SimpleRNNCell', - 'LSTMCell', - 'GRUCell', - 'RNN', - 'BiRNN', - 'SimpleRNN', - 'LSTM', - 'GRU', -] - def split_states(states, bidirectional=False, state_components=1): r""" diff --git a/python/paddle/nn/layer/transformer.py b/python/paddle/nn/layer/transformer.py index fe70a99ffb518fa17595c8a9a1817adcec457493..752870f3d0a28bd0e8db29138f6a5a901076515b 100644 --- a/python/paddle/nn/layer/transformer.py +++ b/python/paddle/nn/layer/transformer.py @@ -13,14 +13,6 @@ # limitations under the License. # TODO: define the classes of Transformer neural network -__all__ = [ - 'MultiHeadAttention', - 'TransformerEncoderLayer', - 'TransformerEncoder', - 'TransformerDecoderLayer', - 'TransformerDecoder', - 'Transformer', -] import copy import collections diff --git a/python/paddle/nn/layer/vision.py b/python/paddle/nn/layer/vision.py index d9c948a848a939c0427c14aee793e2c9c439c47b..e66e122be5259a8861b22d06258f34e1af89b6c9 100644 --- a/python/paddle/nn/layer/vision.py +++ b/python/paddle/nn/layer/vision.py @@ -17,8 +17,6 @@ from ...fluid.dygraph import layers from .. import functional -__all__ = ['PixelShuffle'] - class PixelShuffle(layers.Layer): """ diff --git a/python/paddle/nn/utils/__init__.py b/python/paddle/nn/utils/__init__.py index 6562ac35e1e3180db671f90188f1304f07864189..bf2573d2cbc2d3a5229411abb857dd9d23031889 100644 --- a/python/paddle/nn/utils/__init__.py +++ b/python/paddle/nn/utils/__init__.py @@ -12,5 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import weight_norm_hook -from .weight_norm_hook import weight_norm, remove_weight_norm +from .weight_norm_hook import weight_norm, remove_weight_norm # noqa: F401 + +__all__ = [ #noqa + 'weight_norm', 'remove_weight_norm' +] diff --git a/python/paddle/nn/utils/weight_norm_hook.py b/python/paddle/nn/utils/weight_norm_hook.py index fdf7a1b5bb2e2dc7e5e729a15c76fcbbb32ca12d..23df38ca08c45a4190a56e26fb47637eb349c407 100755 --- a/python/paddle/nn/utils/weight_norm_hook.py +++ b/python/paddle/nn/utils/weight_norm_hook.py @@ -19,8 +19,6 @@ from ...fluid import layers as F from ...fluid.layer_helper import LayerHelper from ...fluid.data_feeder import check_variable_and_dtype -__all__ = ['weight_norm', 'remove_weight_norm'] - def l2_norm(x, axis, epsilon=1e-12, name=None): if len(x.shape) == 1: diff --git a/python/paddle/utils/deprecated.py b/python/paddle/utils/deprecated.py index daa2826ca360f120cea5f0fd0afecc8dc40b0b7e..a46f1ae3a2c2e3a255a1e7fefec25907c8c48e4c 100755 --- a/python/paddle/utils/deprecated.py +++ b/python/paddle/utils/deprecated.py @@ -83,13 +83,14 @@ def deprecated(update_to="", since="", reason=""): 2. since version is empty, in this case, API is deprecated in all versions. 3. current version is newer than since version. """ - msg = "\033[93mWarning %s \033[0m" % (msg) + warningmsg = "\033[93mWarning %s \033[0m" % (msg) v_current = [int(i) for i in paddle.__version__.split(".")] v_current += [0] * (4 - len(v_current)) v_since = [int(i) for i in _since.split(".")] v_since += [0] * (4 - len(v_since)) if paddle.__version__ == "0.0.0" or _since == "" or v_current >= v_since: - warnings.warn(msg, category=DeprecationWarning, stacklevel=2) + warnings.warn( + warningmsg, category=DeprecationWarning, stacklevel=2) return func(*args, **kwargs)