未验证 提交 938a5a53 编写于 作者: Z zhiboniu 提交者: GitHub

cherry-pick from develop: update 2.0 public api in nn #31912 (#32621)

* update 2.0 public api in nn

* replace Chinese character cause error in ci;synchronization with pr:#32588 to avoid 'ascii' codec in python2

* numbers used in paddle.nn.functional.norm but not imported
上级 54ab656c
......@@ -527,7 +527,7 @@ def create_heter_program(program, config, heter_program, heter_ops,
# This function mainly includes the following contents:
# 1. For every heter block:
# a) copy heter device op from origin program
# b) create variables which belong to heter op
# b) create variables which belong to heter op:
# -> if variable is persistable, clone it in global_scope
# -> if variable is temp, create it in heter block
# c) create communicate related op as follow:
......
......@@ -58,7 +58,7 @@ def parse_args():
default="[0,8)",
help="The number of the Ascend accelerators used. please note that the Ascend accelerators"
"used must be continuous, such [0,4) means to use four chips "
"0,1,2,3; [0,1) means to use chip 0; The first four chips are"
"0,1,2,3; [0,1) means to use chip 0; The first four chips are"
"a group, and the last four chips are a group. In addition to"
"the [0,8) chips are allowed, other cross-group such as [3,6)"
"are prohibited.")
......
......@@ -15,148 +15,273 @@
# TODO: import all neural network related api under this directory,
# including layers, linear, conv, rnn etc.
from .layer import norm
from .functional import extension
from .layer import common
from .layer import rnn
from .utils import weight_norm_hook
from . import initializer
__all__ = []
__all__ += norm.__all__
__all__ += extension.__all__
__all__ += common.__all__
__all__ += rnn.__all__
__all__ += weight_norm_hook.__all__
# TODO: define alias in nn directory
from .clip import ClipGradByGlobalNorm #DEFINE_ALIAS
from .clip import ClipGradByNorm #DEFINE_ALIAS
from .clip import ClipGradByValue #DEFINE_ALIAS
# from .control_flow import cond #DEFINE_ALIAS
# from .control_flow import DynamicRNN #DEFINE_ALIAS
# from .control_flow import StaticRNN #DEFINE_ALIAS
# from .control_flow import while_loop #DEFINE_ALIAS
# from .control_flow import rnn #DEFINE_ALIAS
from .decode import BeamSearchDecoder #DEFINE_ALIAS
from .decode import dynamic_decode #DEFINE_ALIAS
# from .decode import Decoder #DEFINE_ALIAS
# from .decode import crf_decoding #DEFINE_ALIAS
# from .decode import ctc_greedy_decoder #DEFINE_ALIAS
# from .input import Input #DEFINE_ALIAS
from .layer.activation import ELU #DEFINE_ALIAS
from .layer.activation import GELU #DEFINE_ALIAS
from .layer.activation import Tanh #DEFINE_ALIAS
from .layer.activation import Hardshrink #DEFINE_ALIAS
from .layer.activation import Hardswish #DEFINE_ALIAS
from .layer.activation import Hardtanh #DEFINE_ALIAS
from .layer.activation import PReLU #DEFINE_ALIAS
from .layer.activation import ReLU #DEFINE_ALIAS
from .layer.activation import ReLU6 #DEFINE_ALIAS
from .layer.activation import SELU #DEFINE_ALIAS
from .layer.activation import Silu #DEFINE_ALIAS
from .layer.activation import LeakyReLU #DEFINE_ALIAS
from .layer.activation import Sigmoid #DEFINE_ALIAS
from .layer.activation import Hardsigmoid #DEFINE_ALIAS
from .layer.activation import LogSigmoid #DEFINE_ALIAS
from .layer.activation import Softmax #DEFINE_ALIAS
from .layer.activation import Softplus #DEFINE_ALIAS
from .layer.activation import Softshrink #DEFINE_ALIAS
from .layer.activation import Softsign #DEFINE_ALIAS
from .layer.activation import Swish #DEFINE_ALIAS
from .layer.activation import Tanhshrink #DEFINE_ALIAS
from .layer.activation import ThresholdedReLU #DEFINE_ALIAS
from .layer.activation import LogSoftmax #DEFINE_ALIAS
from .layer.activation import Maxout #DEFINE_ALIAS
from .layer.common import Pad1D #DEFINE_ALIAS
from .layer.common import Pad2D #DEFINE_ALIAS
from .layer.common import Pad3D #DEFINE_ALIAS
from .layer.common import CosineSimilarity #DEFINE_ALIAS
from .layer.common import Embedding #DEFINE_ALIAS
from .layer.common import Linear #DEFINE_ALIAS
from .layer.common import Flatten #DEFINE_ALIAS
from .layer.common import Upsample #DEFINE_ALIAS
from .layer.common import UpsamplingNearest2D #DEFINE_ALIAS
from .layer.common import UpsamplingBilinear2D #DEFINE_ALIAS
from .layer.common import Bilinear #DEFINE_ALIAS
from .layer.common import Dropout #DEFINE_ALIAS
from .layer.common import Dropout2D #DEFINE_ALIAS
from .layer.common import Dropout3D #DEFINE_ALIAS
from .layer.common import AlphaDropout #DEFINE_ALIAS
from .layer.common import Unfold #DEFINE_ALIAS
from .layer.pooling import AvgPool1D #DEFINE_ALIAS
from .layer.pooling import AvgPool2D #DEFINE_ALIAS
from .layer.pooling import AvgPool3D #DEFINE_ALIAS
from .layer.pooling import MaxPool1D #DEFINE_ALIAS
from .layer.pooling import MaxPool2D #DEFINE_ALIAS
from .layer.pooling import MaxPool3D #DEFINE_ALIAS
from .layer.pooling import AdaptiveAvgPool1D #DEFINE_ALIAS
from .layer.pooling import AdaptiveAvgPool2D #DEFINE_ALIAS
from .layer.pooling import AdaptiveAvgPool3D #DEFINE_ALIAS
from .layer.pooling import AdaptiveMaxPool1D #DEFINE_ALIAS
from .layer.pooling import AdaptiveMaxPool2D #DEFINE_ALIAS
from .layer.pooling import AdaptiveMaxPool3D #DEFINE_ALIAS
from .layer.conv import Conv1D #DEFINE_ALIAS
from .layer.conv import Conv2D #DEFINE_ALIAS
from .layer.conv import Conv3D #DEFINE_ALIAS
from .layer.conv import Conv1DTranspose #DEFINE_ALIAS
from .layer.conv import Conv2DTranspose #DEFINE_ALIAS
from .layer.conv import Conv3DTranspose #DEFINE_ALIAS
# from .layer.conv import TreeConv #DEFINE_ALIAS
# from .layer.conv import Conv1D #DEFINE_ALIAS
from .layer.common import Linear
# from .layer.loss import NCELoss #DEFINE_ALIAS
from .layer.loss import BCEWithLogitsLoss #DEFINE_ALIAS
from .layer.loss import CrossEntropyLoss #DEFINE_ALIAS
from .layer.loss import HSigmoidLoss #DEFINE_ALIAS
from .layer.loss import MSELoss #DEFINE_ALIAS
from .layer.loss import L1Loss #DEFINE_ALIAS
from .layer.loss import NLLLoss #DEFINE_ALIAS
from .layer.loss import BCELoss #DEFINE_ALIAS
from .layer.loss import KLDivLoss #DEFINE_ALIAS
from .layer.loss import MarginRankingLoss #DEFINE_ALIAS
from .layer.loss import CTCLoss #DEFINE_ALIAS
from .layer.loss import SmoothL1Loss #DEFINE_ALIAS
from .layer.norm import BatchNorm #DEFINE_ALIAS
from .layer.norm import SyncBatchNorm #DEFINE_ALIAS
from .layer.norm import GroupNorm #DEFINE_ALIAS
from .layer.norm import LayerNorm #DEFINE_ALIAS
from .layer.norm import SpectralNorm #DEFINE_ALIAS
from .layer.norm import InstanceNorm1D #DEFINE_ALIAS
from .layer.norm import InstanceNorm2D #DEFINE_ALIAS
from .layer.norm import InstanceNorm3D #DEFINE_ALIAS
from .layer.norm import BatchNorm1D #DEFINE_ALIAS
from .layer.norm import BatchNorm2D #DEFINE_ALIAS
from .layer.norm import BatchNorm3D #DEFINE_ALIAS
from .layer.norm import LocalResponseNorm #DEFINE_ALIAS
from .layer.rnn import RNNCellBase #DEFINE_ALIAS
from .layer.rnn import SimpleRNNCell #DEFINE_ALIAS
from .layer.rnn import LSTMCell #DEFINE_ALIAS
from .layer.rnn import GRUCell #DEFINE_ALIAS
from .layer.rnn import RNN #DEFINE_ALIAS
from .layer.rnn import BiRNN #DEFINE_ALIAS
from .layer.rnn import SimpleRNN #DEFINE_ALIAS
from .layer.rnn import LSTM #DEFINE_ALIAS
from .layer.rnn import GRU #DEFINE_ALIAS
from .layer.transformer import MultiHeadAttention
from .layer.transformer import TransformerEncoderLayer
from .layer.transformer import TransformerEncoder
from .layer.transformer import TransformerDecoderLayer
from .layer.transformer import TransformerDecoder
from .layer.transformer import Transformer
from .layer.distance import PairwiseDistance #DEFINE_ALIAS
from .layer.vision import PixelShuffle
from .layer.container import LayerDict #DEFINE_ALIAS
from .layer import loss #DEFINE_ALIAS
from .layer import conv #DEFINE_ALIAS
from .layer import vision #DEFINE_ALIAS
from ..fluid.dygraph.layers import Layer #DEFINE_ALIAS
from ..fluid.dygraph.container import LayerList, ParameterList, Sequential #DEFINE_ALIAS
from .clip import ClipGradByGlobalNorm # noqa: F401
from .clip import ClipGradByNorm # noqa: F401
from .clip import ClipGradByValue # noqa: F401
from .decode import BeamSearchDecoder # noqa: F401
from .decode import dynamic_decode # noqa: F401
from .layer.activation import ELU # noqa: F401
from .layer.activation import GELU # noqa: F401
from .layer.activation import Tanh # noqa: F401
from .layer.activation import Hardshrink # noqa: F401
from .layer.activation import Hardswish # noqa: F401
from .layer.activation import Hardtanh # noqa: F401
from .layer.activation import PReLU # noqa: F401
from .layer.activation import ReLU # noqa: F401
from .layer.activation import ReLU6 # noqa: F401
from .layer.activation import SELU # noqa: F401
from .layer.activation import Silu # noqa: F401
from .layer.activation import LeakyReLU # noqa: F401
from .layer.activation import Sigmoid # noqa: F401
from .layer.activation import Hardsigmoid # noqa: F401
from .layer.activation import LogSigmoid # noqa: F401
from .layer.activation import Softmax # noqa: F401
from .layer.activation import Softplus # noqa: F401
from .layer.activation import Softshrink # noqa: F401
from .layer.activation import Softsign # noqa: F401
from .layer.activation import Swish # noqa: F401
from .layer.activation import Tanhshrink # noqa: F401
from .layer.activation import ThresholdedReLU # noqa: F401
from .layer.activation import LogSoftmax # noqa: F401
from .layer.activation import Maxout # noqa: F401
from .layer.common import Pad1D # noqa: F401
from .layer.common import Pad2D # noqa: F401
from .layer.common import Pad3D # noqa: F401
from .layer.common import CosineSimilarity # noqa: F401
from .layer.common import Embedding # noqa: F401
from .layer.common import Linear # noqa: F401
from .layer.common import Flatten # noqa: F401
from .layer.common import Upsample # noqa: F401
from .layer.common import UpsamplingNearest2D # noqa: F401
from .layer.common import UpsamplingBilinear2D # noqa: F401
from .layer.common import Bilinear # noqa: F401
from .layer.common import Dropout # noqa: F401
from .layer.common import Dropout2D # noqa: F401
from .layer.common import Dropout3D # noqa: F401
from .layer.common import AlphaDropout # noqa: F401
from .layer.common import Unfold # noqa: F401
from .layer.pooling import AvgPool1D # noqa: F401
from .layer.pooling import AvgPool2D # noqa: F401
from .layer.pooling import AvgPool3D # noqa: F401
from .layer.pooling import MaxPool1D # noqa: F401
from .layer.pooling import MaxPool2D # noqa: F401
from .layer.pooling import MaxPool3D # noqa: F401
from .layer.pooling import AdaptiveAvgPool1D # noqa: F401
from .layer.pooling import AdaptiveAvgPool2D # noqa: F401
from .layer.pooling import AdaptiveAvgPool3D # noqa: F401
from .layer.pooling import AdaptiveMaxPool1D # noqa: F401
from .layer.pooling import AdaptiveMaxPool2D # noqa: F401
from .layer.pooling import AdaptiveMaxPool3D # noqa: F401
from .layer.conv import Conv1D # noqa: F401
from .layer.conv import Conv2D # noqa: F401
from .layer.conv import Conv3D # noqa: F401
from .layer.conv import Conv1DTranspose # noqa: F401
from .layer.conv import Conv2DTranspose # noqa: F401
from .layer.conv import Conv3DTranspose # noqa: F401
from .layer.loss import BCEWithLogitsLoss # noqa: F401
from .layer.loss import CrossEntropyLoss # noqa: F401
from .layer.loss import HSigmoidLoss # noqa: F401
from .layer.loss import MSELoss # noqa: F401
from .layer.loss import L1Loss # noqa: F401
from .layer.loss import NLLLoss # noqa: F401
from .layer.loss import BCELoss # noqa: F401
from .layer.loss import KLDivLoss # noqa: F401
from .layer.loss import MarginRankingLoss # noqa: F401
from .layer.loss import CTCLoss # noqa: F401
from .layer.loss import SmoothL1Loss # noqa: F401
from .layer.norm import BatchNorm # noqa: F401
from .layer.norm import SyncBatchNorm # noqa: F401
from .layer.norm import GroupNorm # noqa: F401
from .layer.norm import LayerNorm # noqa: F401
from .layer.norm import SpectralNorm # noqa: F401
from .layer.norm import InstanceNorm1D # noqa: F401
from .layer.norm import InstanceNorm2D # noqa: F401
from .layer.norm import InstanceNorm3D # noqa: F401
from .layer.norm import BatchNorm1D # noqa: F401
from .layer.norm import BatchNorm2D # noqa: F401
from .layer.norm import BatchNorm3D # noqa: F401
from .layer.norm import LocalResponseNorm # noqa: F401
from .layer.rnn import RNNCellBase # noqa: F401
from .layer.rnn import SimpleRNNCell # noqa: F401
from .layer.rnn import LSTMCell # noqa: F401
from .layer.rnn import GRUCell # noqa: F401
from .layer.rnn import RNN # noqa: F401
from .layer.rnn import BiRNN # noqa: F401
from .layer.rnn import SimpleRNN # noqa: F401
from .layer.rnn import LSTM # noqa: F401
from .layer.rnn import GRU # noqa: F401
from .layer.transformer import MultiHeadAttention # noqa: F401
from .layer.transformer import TransformerEncoderLayer # noqa: F401
from .layer.transformer import TransformerEncoder # noqa: F401
from .layer.transformer import TransformerDecoderLayer # noqa: F401
from .layer.transformer import TransformerDecoder # noqa: F401
from .layer.transformer import Transformer # noqa: F401
from .layer.distance import PairwiseDistance # noqa: F401
from .layer.vision import PixelShuffle # noqa: F401
from .layer.container import LayerDict # noqa: F401
# TODO: remove loss, keep it for too many used in unitests
from .layer import loss # noqa: F401
from ..fluid.dygraph.layers import Layer # noqa: F401
from ..fluid.dygraph.container import LayerList # noqa: F401
from ..fluid.dygraph.container import ParameterList # noqa: F401
from ..fluid.dygraph.container import Sequential # noqa: F401
from . import utils # noqa: F401
from . import functional # noqa: F401
from . import initializer # noqa: F401
#TODO: remove 'diag_embed', 'remove_weight_norm', 'weight_norm' months later.
import paddle.utils.deprecated as deprecated
@deprecated(
since="2.0.0",
update_to="paddle.nn.funcitional.diag_embed",
reason="diag_embed in paddle.nn will removed in future")
def diag_embed(*args):
'''
alias name of paddle.nn.functional.diag_embed
'''
return functional.diag_embed(*args)
@deprecated(
since="2.0.0",
update_to="paddle.nn.utils.remove_weight_norm",
reason="remove_weight_norm in paddle.nn will removed in future")
def remove_weight_norm(*args):
'''
alias name of paddle.nn.utils.remove_weight_norm
'''
return utils.remove_weight_norm(*args)
@deprecated(
since="2.0.0",
update_to="paddle.nn.utils.weight_norm",
reason="weight_norm in paddle.nn will removed in future")
def weight_norm(*args):
'''
alias name of paddle.nn.utils.weight_norm
'''
return utils.weight_norm(*args)
__all__ = [ #noqa
'BatchNorm',
'GroupNorm',
'LayerNorm',
'SpectralNorm',
'BatchNorm1D',
'BatchNorm2D',
'BatchNorm3D',
'InstanceNorm1D',
'InstanceNorm2D',
'InstanceNorm3D',
'SyncBatchNorm',
'LocalResponseNorm',
'Embedding',
'Linear',
'Upsample',
'UpsamplingNearest2D',
'UpsamplingBilinear2D',
'Pad1D',
'Pad2D',
'Pad3D',
'CosineSimilarity',
'Dropout',
'Dropout2D',
'Dropout3D',
'Bilinear',
'AlphaDropout',
'Unfold'
'RNNCellBase',
'SimpleRNNCell',
'LSTMCell',
'GRUCell',
'RNN',
'BiRNN',
'SimpleRNN',
'LSTM',
'GRU',
'dynamic_decode',
'MultiHeadAttention',
'Maxout',
'Softsign',
'Transformer',
'MSELoss',
'LogSigmoid',
'BeamSearchDecoder',
'ClipGradByNorm',
'ReLU',
'PairwiseDistance',
'BCEWithLogitsLoss',
'SmoothL1Loss',
'MaxPool3D',
'AdaptiveMaxPool2D',
'Hardshrink',
'clip',
'Softplus',
'KLDivLoss',
'clip_by_norm',
'AvgPool2D',
'L1Loss',
'LeakyReLU',
'AvgPool1D',
'AdaptiveAvgPool3D',
'AdaptiveMaxPool3D',
'NLLLoss',
'Conv1D',
'Sequential',
'Hardswish',
'Conv1DTranspose',
'AdaptiveMaxPool1D',
'TransformerEncoder',
'Softmax',
'ParameterList',
'Conv2D',
'Softshrink',
'Hardtanh',
'TransformerDecoderLayer',
'CrossEntropyLoss',
'GELU',
'SELU',
'Silu',
'Conv2DTranspose',
'CTCLoss',
'ThresholdedReLU',
'AdaptiveAvgPool2D',
'MaxPool1D',
'Layer',
'TransformerDecoder',
'Conv3D',
'Tanh',
'Conv3DTranspose',
'Flatten',
'AdaptiveAvgPool1D',
'Tanhshrink',
'HSigmoidLoss',
'PReLU',
'TransformerEncoderLayer',
'AvgPool3D',
'MaxPool2D',
'MarginRankingLoss',
'LayerList',
'ClipGradByValue',
'BCELoss',
'Hardsigmoid',
'ClipGradByGlobalNorm',
'LogSoftmax',
'Sigmoid',
'Swish',
'PixelShuffle',
'ELU',
'ReLU6'
]
......@@ -13,8 +13,6 @@
# limitations under the License.
# TODO: define the functions to clip gradient of parameter
from ..fluid.clip import ClipGradByGlobalNorm #DEFINE_ALIAS
from ..fluid.clip import ClipGradByNorm #DEFINE_ALIAS
from ..fluid.clip import ClipGradByValue #DEFINE_ALIAS
__all__ = ['ClipGradByGlobalNorm', 'ClipGradByNorm', 'ClipGradByValue']
from ..fluid.clip import ClipGradByGlobalNorm # noqa: F401
from ..fluid.clip import ClipGradByNorm # noqa: F401
from ..fluid.clip import ClipGradByValue # noqa: F401
......@@ -12,10 +12,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from ..fluid.layers import BeamSearchDecoder #DEFINE_ALIAS
from ..fluid.layers import dynamic_decode #DEFINE_ALIAS
__all__ = [
'BeamSearchDecoder',
'dynamic_decode',
]
from ..fluid.layers import BeamSearchDecoder # noqa: F401
from ..fluid.layers import dynamic_decode # noqa: F401
......@@ -14,211 +14,185 @@
# TODO: import all neural network related api under this directory,
# including layers, linear, conv, rnn etc.
__all__ = []
# TODO: define alias in functional directory
from . import conv
__all__ += conv.__all__
from . import activation
__all__ += activation.__all__
from . import extension
__all__ += extension.__all__
from . import common
__all__ += common.__all__
from . import pooling
__all__ += pooling.__all__
from . import loss
__all__ += loss.__all__
from .activation import elu #DEFINE_ALIAS
from .activation import elu_ #DEFINE_ALIAS
# from .activation import erf #DEFINE_ALIAS
from .activation import gelu #DEFINE_ALIAS
from .activation import hardshrink #DEFINE_ALIAS
from .activation import hardtanh #DEFINE_ALIAS
from .activation import hardsigmoid #DEFINE_ALIAS
from .activation import hardswish #DEFINE_ALIAS
from .activation import leaky_relu #DEFINE_ALIAS
from .activation import log_sigmoid #DEFINE_ALIAS
from .activation import maxout #DEFINE_ALIAS
from .activation import prelu #DEFINE_ALIAS
from .activation import relu #DEFINE_ALIAS
from .activation import relu_ #DEFINE_ALIAS
from .activation import relu6 #DEFINE_ALIAS
from .activation import selu #DEFINE_ALIAS
from .activation import sigmoid #DEFINE_ALIAS
from .activation import silu #DEFINE_ALIAS
# from .activation import soft_relu #DEFINE_ALIAS
from .activation import softmax #DEFINE_ALIAS
from .activation import softmax_ #DEFINE_ALIAS
from .activation import softplus #DEFINE_ALIAS
from .activation import softshrink #DEFINE_ALIAS
from .activation import softsign #DEFINE_ALIAS
from .activation import swish #DEFINE_ALIAS
from .activation import tanh #DEFINE_ALIAS
from .activation import tanh_ #DEFINE_ALIAS
from .activation import tanhshrink #DEFINE_ALIAS
from .activation import thresholded_relu #DEFINE_ALIAS
from .activation import log_softmax #DEFINE_ALIAS
from .activation import glu #DEFINE_ALIAS
from .common import dropout #DEFINE_ALIAS
from .common import dropout2d #DEFINE_ALIAS
from .common import dropout3d #DEFINE_ALIAS
from .common import alpha_dropout #DEFINE_ALIAS
# from .common import embedding #DEFINE_ALIAS
# from .common import fc #DEFINE_ALIAS
from .common import label_smooth
# from .common import one_hot #DEFINE_ALIAS
from .common import pad #DEFINE_ALIAS
# from .common import pad_constant_like #DEFINE_ALIAS
# from .common import pad2d #DEFINE_ALIAS
from .common import cosine_similarity #DEFINE_ALIAS
from .common import unfold #DEFINE_ALIAS
# from .common import bilinear_tensor_product #DEFINE_ALIAS
from .common import interpolate #DEFINE_ALIAS
from .common import upsample #DEFINE_ALIAS
from .common import bilinear #DEFINE_ALIAS
from .conv import conv1d #DEFINE_ALIAS
from .conv import conv1d_transpose #DEFINE_ALIAS
from .common import linear #DEFINE_ALIAS
from .conv import conv2d #DEFINE_ALIAS
from .conv import conv2d_transpose #DEFINE_ALIAS
from .conv import conv3d #DEFINE_ALIAS
from .conv import conv3d_transpose #DEFINE_ALIAS
# from .extension import add_position_encoding #DEFINE_ALIAS
# from .extension import autoincreased_step_counter #DEFINE_ALIAS
# from .extension import continuous_value_model #DEFINE_ALIAS
# from .extension import filter_by_instag #DEFINE_ALIAS
# from .extension import linear_chain_crf #DEFINE_ALIAS
# from .extension import merge_selected_rows #DEFINE_ALIAS
# from .extension import multiclass_nms #DEFINE_ALIAS
# from .extension import polygon_box_transform #DEFINE_ALIAS
# from .extension import random_crop #DEFINE_ALIAS
# from .extension import rpn_target_assign #DEFINE_ALIAS
# from .extension import similarity_focus #DEFINE_ALIAS
# from .extension import target_assign #DEFINE_ALIAS
# from .extension import temporal_shift #DEFINE_ALIAS
# from .extension import warpctc #DEFINE_ALIAS
from .extension import diag_embed #DEFINE_ALIAS
from .activation import elu # noqa: F401
from .activation import elu_ # noqa: F401
from .activation import gelu # noqa: F401
from .activation import hardshrink # noqa: F401
from .activation import hardtanh # noqa: F401
from .activation import hardsigmoid # noqa: F401
from .activation import hardswish # noqa: F401
from .activation import leaky_relu # noqa: F401
from .activation import log_sigmoid # noqa: F401
from .activation import maxout # noqa: F401
from .activation import prelu # noqa: F401
from .activation import relu # noqa: F401
from .activation import relu_ # noqa: F401
from .activation import relu6 # noqa: F401
from .activation import selu # noqa: F401
from .activation import sigmoid # noqa: F401
from .activation import silu # noqa: F401
from .activation import softmax # noqa: F401
from .activation import softmax_ # noqa: F401
from .activation import softplus # noqa: F401
from .activation import softshrink # noqa: F401
from .activation import softsign # noqa: F401
from .activation import swish # noqa: F401
from .activation import tanh # noqa: F401
from .activation import tanh_ # noqa: F401
from .activation import tanhshrink # noqa: F401
from .activation import thresholded_relu # noqa: F401
from .activation import log_softmax # noqa: F401
from .activation import glu # noqa: F401
from .common import dropout # noqa: F401
from .common import dropout2d # noqa: F401
from .common import dropout3d # noqa: F401
from .common import alpha_dropout # noqa: F401
from .common import label_smooth # noqa: F401
from .common import pad # noqa: F401
from .common import cosine_similarity # noqa: F401
from .common import unfold # noqa: F401
from .common import interpolate # noqa: F401
from .common import upsample # noqa: F401
from .common import bilinear # noqa: F401
from .conv import conv1d # noqa: F401
from .conv import conv1d_transpose # noqa: F401
from .common import linear # noqa: F401
from .conv import conv2d # noqa: F401
from .conv import conv2d_transpose # noqa: F401
from .conv import conv3d # noqa: F401
from .conv import conv3d_transpose # noqa: F401
from .extension import diag_embed # noqa: F401
from .extension import sequence_mask
# from .lod import sequence_concat #DEFINE_ALIAS
# from .lod import sequence_conv #DEFINE_ALIAS
# from .lod import sequence_enumerate #DEFINE_ALIAS
# from .lod import sequence_expand_as #DEFINE_ALIAS
# from .lod import sequence_expand #DEFINE_ALIAS
# from .lod import sequence_first_step #DEFINE_ALIAS
# from .lod import sequence_last_step #DEFINE_ALIAS
# from .lod import sequence_mask #DEFINE_ALIAS
# from .lod import sequence_pad #DEFINE_ALIAS
# from .lod import sequence_pool #DEFINE_ALIAS
# from .lod import sequence_reshape #DEFINE_ALIAS
# from .lod import sequence_reverse #DEFINE_ALIAS
# from .lod import sequence_scatter #DEFINE_ALIAS
# from .lod import sequence_slice #DEFINE_ALIAS
# from .lod import sequence_softmax #DEFINE_ALIAS
# from .lod import sequence_unpad #DEFINE_ALIAS
# from .lod import array_length #DEFINE_ALIAS
# from .lod import array_read #DEFINE_ALIAS
# from .lod import array_write #DEFINE_ALIAS
# from .lod import create_array #DEFINE_ALIAS
# from .lod import hash #DEFINE_ALIAS
# from .lod import im2sequence #DEFINE_ALIAS
# from .lod import lod_append #DEFINE_ALIAS
# from .lod import lod_reset #DEFINE_ALIAS
# from .lod import reorder_lod_tensor_by_rank #DEFINE_ALIAS
# from .lod import tensor_array_to_tensor #DEFINE_ALIAS
# from .lod import dynamic_gru #DEFINE_ALIAS
# from .lod import dynamic_lstm #DEFINE_ALIAS
# from .lod import dynamic_lstmp #DEFINE_ALIAS
from .loss import binary_cross_entropy #DEFINE_ALIAS
from .loss import binary_cross_entropy_with_logits #DEFINE_ALIAS
# from .loss import bpr_loss #DEFINE_ALIAS
# from .loss import center_loss #DEFINE_ALIAS
#from .loss import cross_entropy #DEFINE_ALIAS
from .loss import cross_entropy #DEFINE_ALIAS
from .loss import dice_loss #DEFINE_ALIAS
from .loss import hsigmoid_loss #DEFINE_ALIAS
from .loss import kl_div #DEFINE_ALIAS
from .loss import l1_loss #DEFINE_ALIAS
from .loss import log_loss #DEFINE_ALIAS
from .loss import margin_ranking_loss #DEFINE_ALIAS
from .loss import mse_loss #DEFINE_ALIAS
from .loss import nll_loss #DEFINE_ALIAS
# from .loss import nce #DEFINE_ALIAS
from .loss import npair_loss #DEFINE_ALIAS
from .loss import sigmoid_focal_loss #DEFINE_ALIAS
# from .loss import smooth_l1 #DEFINE_ALIAS
from .loss import smooth_l1_loss #DEFINE_ALIAS
from .loss import softmax_with_cross_entropy #DEFINE_ALIAS
from .loss import square_error_cost #DEFINE_ALIAS
# from .loss import teacher_student_sigmoid_loss #DEFINE_ALIAS
from .loss import ctc_loss #DEFINE_ALIAS
# from .norm import data_norm #DEFINE_ALIAS
# from .norm import group_norm #DEFINE_ALIAS
from .norm import batch_norm #DEFINE_ALIAS
from .norm import instance_norm #DEFINE_ALIAS
from .norm import layer_norm #DEFINE_ALIAS
from .norm import local_response_norm #DEFINE_ALIAS
from .norm import normalize #DEFINE_ALIAS
# from .norm import spectral_norm #DEFINE_ALIAS
# from .pooling import pool2d #DEFINE_ALIAS
# from .pooling import pool3d #DEFINE_ALIAS
from .pooling import avg_pool1d #DEFINE_ALIAS
from .pooling import avg_pool2d #DEFINE_ALIAS
from .pooling import avg_pool3d #DEFINE_ALIAS
from .pooling import max_pool1d #DEFINE_ALIAS
from .pooling import max_pool2d #DEFINE_ALIAS
from .pooling import max_pool3d #DEFINE_ALIAS
from .loss import binary_cross_entropy # noqa: F401
from .loss import binary_cross_entropy_with_logits # noqa: F401
from .loss import cross_entropy # noqa: F401
from .loss import dice_loss # noqa: F401
from .loss import hsigmoid_loss # noqa: F401
from .loss import kl_div # noqa: F401
from .loss import l1_loss # noqa: F401
from .loss import log_loss # noqa: F401
from .loss import margin_ranking_loss # noqa: F401
from .loss import mse_loss # noqa: F401
from .loss import nll_loss # noqa: F401
from .loss import npair_loss # noqa: F401
from .loss import sigmoid_focal_loss # noqa: F401
from .loss import smooth_l1_loss # noqa: F401
from .loss import softmax_with_cross_entropy # noqa: F401
from .loss import square_error_cost # noqa: F401
from .loss import ctc_loss # noqa: F401
from .norm import batch_norm # noqa: F401
from .norm import instance_norm # noqa: F401
from .norm import layer_norm # noqa: F401
from .norm import local_response_norm # noqa: F401
from .norm import normalize # noqa: F401
from .pooling import avg_pool1d # noqa: F401
from .pooling import avg_pool2d # noqa: F401
from .pooling import avg_pool3d # noqa: F401
from .pooling import max_pool1d # noqa: F401
from .pooling import max_pool2d # noqa: F401
from .pooling import max_pool3d # noqa: F401
from .pooling import adaptive_max_pool1d #DEFINE_ALIAS
from .pooling import adaptive_max_pool2d #DEFINE_ALIAS
from .pooling import adaptive_max_pool3d #DEFINE_ALIAS
from .pooling import adaptive_avg_pool1d #DEFINE_ALIAS
from .pooling import adaptive_avg_pool2d #DEFINE_ALIAS
from .pooling import adaptive_avg_pool3d #DEFINE_ALIAS
from .pooling import adaptive_max_pool1d # noqa: F401
from .pooling import adaptive_max_pool2d # noqa: F401
from .pooling import adaptive_max_pool3d # noqa: F401
from .pooling import adaptive_avg_pool1d # noqa: F401
from .pooling import adaptive_avg_pool2d # noqa: F401
from .pooling import adaptive_avg_pool3d # noqa: F401
# from .rnn import rnn #DEFINE_ALIAS
# from .rnn import birnn #DEFINE_ALIAS
# from .rnn import gru_unit #DEFINE_ALIAS
# from .rnn import lstm #DEFINE_ALIAS
# from .rnn import lstm_unit #DEFINE_ALIAS
# from .vision import affine_channel #DEFINE_ALIAS
from .vision import affine_grid #DEFINE_ALIAS
# from .vision import anchor_generator #DEFINE_ALIAS
# from .vision import bipartite_match #DEFINE_ALIAS
# from .vision import box_clip #DEFINE_ALIAS
# from .vision import box_coder #DEFINE_ALIAS
# from .vision import box_decoder_and_assign #DEFINE_ALIAS
# from .vision import collect_fpn_proposals #DEFINE_ALIAS
# from .vision import deformable_conv #DEFINE_ALIAS
# from .vision import deformable_roi_pooling #DEFINE_ALIAS
# from .vision import density_prior_box #DEFINE_ALIAS
# from .vision import detection_output #DEFINE_ALIAS
# from .vision import distribute_fpn_proposals #DEFINE_ALIAS
# from .vision import fsp_matrix #DEFINE_ALIAS
# from .vision import generate_mask_labels #DEFINE_ALIAS
# from .vision import generate_proposal_labels #DEFINE_ALIAS
# from .vision import generate_proposals #DEFINE_ALIAS
from .vision import grid_sample #DEFINE_ALIAS
# from .vision import image_resize #DEFINE_ALIAS
# from .vision import image_resize_short #DEFINE_ALIAS
# from .vision import multi_box_head #DEFINE_ALIAS
from .vision import pixel_shuffle #DEFINE_ALIAS
# from .vision import prior_box #DEFINE_ALIAS
# from .vision import prroi_pool #DEFINE_ALIAS
# from .vision import psroi_pool #DEFINE_ALIAS
# from .vision import resize_bilinear #DEFINE_ALIAS
# from .vision import resize_nearest #DEFINE_ALIAS
# from .vision import resize_trilinear #DEFINE_ALIAS
# from .vision import retinanet_detection_output #DEFINE_ALIAS
# from .vision import retinanet_target_assign #DEFINE_ALIAS
# from .vision import roi_align #DEFINE_ALIAS
# from .vision import roi_perspective_transform #DEFINE_ALIAS
# from .vision import roi_pool #DEFINE_ALIAS
# from .vision import shuffle_channel #DEFINE_ALIAS
# from .vision import space_to_depth #DEFINE_ALIAS
# from .vision import yolo_box #DEFINE_ALIAS
# from .vision import yolov3_loss #DEFINE_ALIAS
from .input import one_hot #DEFINE_ALIAS
from .input import embedding #DEFINE_ALIAS
from ...fluid.layers import gather_tree
from ...fluid.layers import temporal_shift
from .vision import affine_grid # noqa: F401
from .vision import grid_sample # noqa: F401
from .vision import pixel_shuffle # noqa: F401
from .input import one_hot # noqa: F401
from .input import embedding # noqa: F401
from ...fluid.layers import gather_tree # noqa: F401
from ...fluid.layers import temporal_shift # noqa: F401
__all__ = [ #noqa
'conv1d',
'conv1d_transpose',
'conv2d',
'conv2d_transpose',
'conv3d',
'conv3d_transpose',
'elu',
'elu_',
'gelu',
'hardshrink',
'hardtanh',
'hardsigmoid',
'hardswish',
'leaky_relu',
'log_sigmoid',
'maxout',
'prelu',
'relu',
'relu_',
'relu6',
'selu',
'softmax',
'softmax_',
'softplus',
'softshrink',
'softsign',
'sigmoid',
'silu',
'swish',
'tanh',
'tanh_',
'tanhshrink',
'thresholded_relu',
'log_softmax',
'glu',
'diag_embed',
'sequence_mask',
'dropout',
'dropout2d',
'dropout3d',
'alpha_dropout',
'label_smooth',
'linear',
'pad',
'unfold',
'interpolate',
'upsample',
'bilinear',
'cosine_similarity',
'avg_pool1d',
'avg_pool2d',
'avg_pool3d',
'max_pool1d',
'max_pool2d',
'max_pool3d',
'adaptive_avg_pool1d',
'adaptive_avg_pool2d',
'adaptive_avg_pool3d',
'adaptive_max_pool1d',
'adaptive_max_pool2d',
'adaptive_max_pool3d',
'binary_cross_entropy',
'binary_cross_entropy_with_logits',
'cross_entropy',
'dice_loss',
'hsigmoid_loss',
'kl_div',
'l1_loss',
'log_loss',
'mse_loss',
'margin_ranking_loss',
'nll_loss',
'npair_loss',
'sigmoid_focal_loss',
'smooth_l1_loss',
'softmax_with_cross_entropy',
'square_error_cost',
'ctc_loss',
'affine_grid',
'grid_sample',
'local_response_norm',
'pixel_shuffle',
'embedding',
'gather_tree',
'one_hot',
'normalize'
]
......@@ -12,53 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define activation functions of neural network
from ...fluid.layers import brelu #DEFINE_ALIAS
# from ...fluid.layers import erf #DEFINE_ALIAS
from ...fluid.layers import maxout #DEFINE_ALIAS
# from ...fluid.layers import soft_relu #DEFINE_ALIAS
from ...fluid.layers import swish #DEFINE_ALIAS
from ...fluid.layers import sigmoid #DEFINE_ALIAS
from ...tensor.math import tanh #DEFINE_ALIAS
from ...tensor.math import tanh_ #DEFINE_ALIAS
from ...fluid.layers import sigmoid # noqa: F401
from ...tensor.math import tanh # noqa: F401
from ...tensor.math import tanh_ # noqa: F401
from ...tensor.manipulation import _print_warning_in_static_mode
from ...tensor.manipulation import chunk
from ...tensor.math import multiply
__all__ = [
'brelu',
'elu',
'elu_',
'gelu',
'hardshrink',
'hardtanh',
'hardsigmoid',
'hardswish',
'leaky_relu',
'log_sigmoid',
'maxout',
'prelu',
'relu',
'relu_',
'relu6',
'selu',
'softmax',
'softmax_',
'softplus',
'softshrink',
'softsign',
'sigmoid',
'silu'
'swish',
'tanh',
'tanh_',
'tanhshrink',
'thresholded_relu',
'log_softmax',
'glu',
]
import warnings
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import in_dygraph_mode, convert_np_dtype_to_dtype_
......
......@@ -20,44 +20,20 @@ from paddle.fluid.layers.tensor import Variable, fill_constant, zeros, concat
from ...fluid.layers import core
from ...fluid import dygraph_utils
# TODO: define the common functions to build a neural network
# from ...fluid import one_hot #DEFINE_ALIAS
# from ...fluid.layers import pad2d #DEFINE_ALIAS
from ...fluid.layers import unfold #DEFINE_ALIAS
from ...fluid.layers import squeeze #DEFINE_ALIAS
from ...fluid.layers import unsqueeze #DEFINE_ALIAS
from ...fluid.layers import unfold # noqa: F401
from ...fluid.layers import squeeze
from ...fluid.layers import unsqueeze
from ...tensor import clip
from ...tensor import sum
from ...tensor import sqrt
from ...tensor import sum #DEFINE_ALIAS
from ...tensor import sqrt #DEFINE_ALIAS
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
from ...fluid.framework import Variable, in_dygraph_mode, _varbase_creator
#from ...fluid.layers import fc #DEFINE_ALIAS
# from ...fluid.layers import pad_constant_like #DEFINE_ALIAS
from ...fluid.framework import in_dygraph_mode
from ...fluid import core, dygraph_utils
from ...fluid import core, layers
from ...fluid.data_feeder import check_variable_and_dtype
__all__ = [
'dropout',
'dropout2d',
'dropout3d',
'alpha_dropout',
# 'embedding',
# 'fc',
'label_smooth',
'linear',
'pad',
'unfold',
# 'bilinear_tensor_product',
'interpolate',
'upsample',
'bilinear',
'cosine_similarity',
]
def interpolate(x,
size=None,
......
......@@ -13,15 +13,6 @@
# limitations under the License.
from __future__ import print_function
__all__ = [
'conv1d',
'conv1d_transpose',
'conv2d',
'conv2d_transpose',
'conv3d',
'conv3d_transpose',
]
import numpy as np
from ...device import get_cudnn_version
from ...fluid.framework import Variable, in_dygraph_mode
......
......@@ -14,8 +14,6 @@
# TODO: define the extention functions
__all__ = ['diag_embed', 'sequence_mask']
import numpy as np
from ...fluid.data_feeder import check_dtype
from ...fluid.layer_helper import LayerHelper
......
......@@ -19,8 +19,6 @@ from ...fluid.layer_helper import LayerHelper
from ...fluid.layers import core
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
__all__ = ['one_hot', 'embedding']
def one_hot(x, num_classes, name=None):
"""
......
......@@ -24,14 +24,14 @@ import paddle
import paddle.fluid as fluid
from ...fluid.framework import core, in_dygraph_mode
from ...fluid.layers.nn import _elementwise_op_in_dygraph
from ...fluid.layers import dice_loss #DEFINE_ALIAS
from ...fluid.layers import log_loss #DEFINE_ALIAS
from ...fluid.layers import npair_loss #DEFINE_ALIAS
from ...fluid.layers import dice_loss # noqa: F401
from ...fluid.layers import log_loss # noqa: F401
from ...fluid.layers import npair_loss # noqa: F401
from ...fluid.layers import reshape
from ...fluid.layers import softmax_with_cross_entropy as fluid_softmax_with_cross_entropy #DEFINE_ALIAS
from ...fluid.layers import square_error_cost #DEFINE_ALIAS
from ...fluid.layers import softmax_with_cross_entropy as fluid_softmax_with_cross_entropy
from ...fluid.layers import square_error_cost # noqa: F401
from ...fluid.layers import edit_distance #DEFINE_ALIAS
from ...fluid.layers import edit_distance # noqa: F401
from ...fluid.layers import huber_loss
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import in_dygraph_mode
......@@ -39,27 +39,6 @@ from ...fluid.framework import _varbase_creator
from ...fluid.framework import Variable
from paddle.utils import deprecated
__all__ = [
'binary_cross_entropy',
'binary_cross_entropy_with_logits',
'cross_entropy',
'dice_loss',
'hsigmoid_loss',
'kl_div',
'l1_loss',
'log_loss',
'mse_loss',
'margin_ranking_loss',
# 'nce',
'nll_loss',
'npair_loss',
'sigmoid_focal_loss',
'smooth_l1_loss',
'softmax_with_cross_entropy',
'square_error_cost',
'ctc_loss',
]
def binary_cross_entropy(input, label, weight=None, reduction='mean',
name=None):
......@@ -1312,7 +1291,7 @@ def cross_entropy(input,
Indicate whether compute softmax before cross_entropy.
Default is ``True``.
- **name** (stroptional)
- **name** (str, optional)
The name of the operator. Default is ``None`` .
For more information, please refer to :ref:`api_guide_Name` .
......
......@@ -22,19 +22,8 @@ from ...framework import create_parameter
from ...fluid.initializer import Constant
from ...fluid.param_attr import ParamAttr
from ...fluid import core, dygraph_utils
import numbers
__all__ = [
'batch_norm',
# 'data_norm',
'instance_norm',
'layer_norm',
'local_response_norm',
'normalize',
# 'spectral_norm'
]
def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
r"""
......
......@@ -18,21 +18,6 @@ from ...fluid.framework import in_dygraph_mode
from ...fluid.layers import utils, LayerHelper, unsqueeze, squeeze
from ...fluid.data_feeder import check_type, check_variable_and_dtype
__all__ = [
'avg_pool1d',
'avg_pool2d',
'avg_pool3d',
'max_pool1d',
'max_pool2d',
'max_pool3d',
'adaptive_avg_pool1d',
'adaptive_avg_pool2d',
'adaptive_avg_pool3d',
'adaptive_max_pool1d',
'adaptive_max_pool2d',
'adaptive_max_pool3d',
]
def _is_list_or_tuple(input):
return isinstance(input, (list, tuple))
......
......@@ -19,43 +19,6 @@ from ...fluid.data_feeder import check_variable_and_dtype
from ...fluid import dygraph_utils
import numpy as np
# TODO: define specitial functions used in computer vision task
# from ...fluid.layers import affine_channel #DEFINE_ALIAS
# from ...fluid.layers import anchor_generator #DEFINE_ALIAS
# from ...fluid.layers import bipartite_match #DEFINE_ALIAS
# from ...fluid.layers import box_clip #DEFINE_ALIAS
# from ...fluid.layers import box_coder #DEFINE_ALIAS
# from ...fluid.layers import box_decoder_and_assign #DEFINE_ALIAS
# from ...fluid.layers import collect_fpn_proposals #DEFINE_ALIAS
# from ...fluid.layers import deformable_roi_pooling #DEFINE_ALIAS
# from ...fluid.layers import density_prior_box #DEFINE_ALIAS
# from ...fluid.layers import detection_output #DEFINE_ALIAS
# from ...fluid.layers import distribute_fpn_proposals #DEFINE_ALIAS
# from ...fluid.layers import generate_mask_labels #DEFINE_ALIAS
# from ...fluid.layers import generate_proposal_labels #DEFINE_ALIAS
# from ...fluid.layers import generate_proposals #DEFINE_ALIAS
# from ...fluid.layers import image_resize #DEFINE_ALIAS
# from ...fluid.layers import prior_box #DEFINE_ALIAS
# from ...fluid.layers import prroi_pool #DEFINE_ALIAS
# from ...fluid.layers import psroi_pool #DEFINE_ALIAS
# from ...fluid.layers import resize_bilinear #DEFINE_ALIAS
# from ...fluid.layers import resize_nearest #DEFINE_ALIAS
# from ...fluid.layers import resize_trilinear #DEFINE_ALIAS
# from ...fluid.layers import roi_align #DEFINE_ALIAS
# from ...fluid.layers import roi_pool #DEFINE_ALIAS
# from ...fluid.layers import space_to_depth #DEFINE_ALIAS
# from ...fluid.layers import yolo_box #DEFINE_ALIAS
# from ...fluid.layers import yolov3_loss #DEFINE_ALIAS
# from ...fluid.layers import fsp_matrix #DEFINE_ALIAS
# from ...fluid.layers import image_resize_short #DEFINE_ALIAS
# from ...fluid.layers import pixel_shuffle #DEFINE_ALIAS
# from ...fluid.layers import retinanet_detection_output #DEFINE_ALIAS
# from ...fluid.layers import retinanet_target_assign #DEFINE_ALIAS
# from ...fluid.layers import roi_perspective_transform #DEFINE_ALIAS
# from ...fluid.layers import shuffle_channel #DEFINE_ALIAS
__all__ = ['affine_grid', 'grid_sample', 'pixel_shuffle']
def affine_grid(theta, out_shape, align_corners=True, name=None):
"""
......
......@@ -13,36 +13,34 @@
# limitations under the License.
# TODO: define the initializers to create a Parameter in neural network
from ...fluid.initializer import Bilinear #DEFINE_ALIAS
from ...fluid.initializer import set_global_initializer #DEFINE_ALIAS
from ...fluid.initializer import Bilinear # noqa: F401
from ...fluid.initializer import set_global_initializer # noqa: F401
from . import constant
from .constant import Constant #DEFINE_ALIAS
from .constant import Constant # noqa: F401
from . import kaiming
from .kaiming import KaimingNormal #DEFINE_ALIAS
from .kaiming import KaimingUniform #DEFINE_ALIAS
from .kaiming import KaimingNormal # noqa: F401
from .kaiming import KaimingUniform # noqa: F401
__all__ = ['Bilinear', 'set_global_initializer']
from .xavier import XavierNormal # noqa: F401
from .xavier import XavierUniform # noqa: F401
__all__ += constant.__all__
__all__ += kaiming.__all__
from .assign import Assign # noqa: F401
from . import xavier
from .xavier import XavierNormal #DEFINE_ALIAS
from .xavier import XavierUniform #DEFINE_ALIAS
from .normal import Normal # noqa: F401
from .normal import TruncatedNormal # noqa: F401
from . import assign
from .assign import Assign #DEFINE_ALIAS
from .uniform import Uniform # noqa: F401
from . import normal
from .normal import Normal #DEFINE_ALIAS
from .normal import TruncatedNormal #DEFINE_ALIAS
from . import uniform
from .uniform import Uniform #DEFINE_ALIAS
__all__ += xavier.__all__
__all__ += assign.__all__
__all__ += normal.__all__
__all__ += uniform.__all__
__all__ = [ #noqa
'Bilinear',
'Constant',
'KaimingUniform',
'KaimingNormal',
'XavierNormal',
'XavierUniform',
'Assign',
'Normal',
'TruncatedNormal',
'Uniform',
'set_global_initializer'
]
......@@ -19,8 +19,6 @@ from ...fluid.core import VarDesc
from ...fluid.data_feeder import check_type
from ...fluid.initializer import NumpyArrayInitializer
__all__ = ['Assign']
class Assign(NumpyArrayInitializer):
"""Init an parameter with a numpy array, list, or tensor.
......
......@@ -15,8 +15,6 @@
# TODO: define the initializers of Constant in neural network
from ...fluid.initializer import ConstantInitializer
__all__ = ['Constant']
class Constant(ConstantInitializer):
"""Implement the constant initializer.
......
......@@ -15,8 +15,6 @@
# TODO: define the initializers of Kaiming functions in neural network
from ...fluid.initializer import MSRAInitializer
__all__ = ['KaimingUniform', 'KaimingNormal']
class KaimingNormal(MSRAInitializer):
r"""Implements the Kaiming Normal initializer
......
......@@ -15,8 +15,6 @@
from ...fluid.initializer import NormalInitializer
from ...fluid.initializer import TruncatedNormalInitializer
__all__ = ['Normal', 'TruncatedNormal']
class Normal(NormalInitializer):
"""The Random Normal (Gaussian) distribution initializer.
......
......@@ -14,8 +14,6 @@
from ...fluid.initializer import UniformInitializer
__all__ = ['Uniform']
class Uniform(UniformInitializer):
"""The random uniform distribution initializer.
......
......@@ -14,8 +14,6 @@
from ...fluid.initializer import XavierInitializer
__all__ = ['XavierNormal', 'XavierUniform']
class XavierNormal(XavierInitializer):
r"""
......
......@@ -14,90 +14,70 @@
# TODO: define activation functions of neural network
from . import activation
from . import loss
from . import conv
from . import activation
from . import norm
from . import rnn
from . import vision
from . import distance
from . import transformer
from . import container
from . import rnn # noqa: F401
from . import transformer # noqa: F401
from . import container # noqa: F401
from .activation import *
from .loss import *
from .conv import *
from .activation import *
from .norm import *
from .rnn import *
from .vision import *
from .activation import PReLU # noqa: F401
from .activation import ReLU # noqa: F401
from .activation import ReLU6 # noqa: F401
from .activation import LeakyReLU # noqa: F401
from .activation import Sigmoid # noqa: F401
from .activation import Softmax # noqa: F401
from .activation import LogSoftmax # noqa: F401
from .common import Bilinear # noqa: F401
from .common import Pad1D # noqa: F401
from .common import Pad2D # noqa: F401
from .common import Pad3D # noqa: F401
from .common import CosineSimilarity # noqa: F401
from .common import Embedding # noqa: F401
from .common import Linear # noqa: F401
from .common import Flatten # noqa: F401
from .common import Upsample # noqa: F401
from .common import Dropout # noqa: F401
from .common import Dropout2D # noqa: F401
from .common import Dropout3D # noqa: F401
from .common import AlphaDropout # noqa: F401
from .common import Upsample # noqa: F401
from .common import UpsamplingBilinear2D # noqa: F401
from .common import UpsamplingNearest2D # noqa: F401
from .pooling import AvgPool1D # noqa: F401
from .pooling import AvgPool2D # noqa: F401
from .pooling import AvgPool3D # noqa: F401
from .pooling import MaxPool1D # noqa: F401
from .pooling import MaxPool2D # noqa: F401
from .pooling import MaxPool3D # noqa: F401
from .pooling import AdaptiveAvgPool1D # noqa: F401
from .pooling import AdaptiveAvgPool2D # noqa: F401
from .pooling import AdaptiveAvgPool3D # noqa: F401
from .pooling import AdaptiveMaxPool1D # noqa: F401
from .pooling import AdaptiveMaxPool2D # noqa: F401
from .pooling import AdaptiveMaxPool3D # noqa: F401
from .conv import Conv1D # noqa: F401
from .conv import Conv2D # noqa: F401
from .conv import Conv3D # noqa: F401
from .conv import Conv1DTranspose # noqa: F401
from .conv import Conv2DTranspose # noqa: F401
from .conv import Conv3DTranspose # noqa: F401
from .loss import BCEWithLogitsLoss # noqa: F401
from .loss import CrossEntropyLoss # noqa: F401
from .loss import MSELoss # noqa: F401
from .loss import L1Loss # noqa: F401
from .loss import NLLLoss # noqa: F401
from .loss import BCELoss # noqa: F401
from .loss import KLDivLoss # noqa: F401
from .loss import MarginRankingLoss # noqa: F401
from .loss import CTCLoss # noqa: F401
from .loss import SmoothL1Loss # noqa: F401
from .norm import BatchNorm1D # noqa: F401
from .norm import BatchNorm2D # noqa: F401
from .norm import BatchNorm3D # noqa: F401
from .norm import SyncBatchNorm # noqa: F401
from .norm import GroupNorm # noqa: F401
from .norm import LayerNorm # noqa: F401
from .norm import SpectralNorm # noqa: F401
from .norm import LocalResponseNorm # noqa: F401
from .transformer import *
from .activation import PReLU #DEFINE_ALIAS
from .activation import ReLU #DEFINE_ALIAS
from .activation import LeakyReLU #DEFINE_ALIAS
from .activation import Sigmoid #DEFINE_ALIAS
from .activation import Softmax #DEFINE_ALIAS
from .activation import LogSoftmax #DEFINE_ALIAS
from .common import Bilinear #DEFINE_ALIAS
from .common import Pad1D #DEFINE_ALIAS
from .common import Pad2D #DEFINE_ALIAS
from .common import Pad3D #DEFINE_ALIAS
from .common import CosineSimilarity #DEFINE_ALIAS
from .common import Embedding #DEFINE_ALIAS
from .common import Linear #DEFINE_ALIAS
from .common import Flatten #DEFINE_ALIAS
from .common import Upsample #DEFINE_ALIAS
from .common import Dropout #DEFINE_ALIAS
from .common import Dropout2D #DEFINE_ALIAS
from .common import Dropout3D #DEFINE_ALIAS
from .common import AlphaDropout #DEFINE_ALIAS
from .common import Upsample #DEFINE_ALIAS
from .common import UpsamplingBilinear2D #DEFINE_ALIAS
from .common import UpsamplingNearest2D #DEFINE_ALIAS
from .pooling import AvgPool1D #DEFINE_ALIAS
from .pooling import AvgPool2D #DEFINE_ALIAS
from .pooling import AvgPool3D #DEFINE_ALIAS
from .pooling import MaxPool1D #DEFINE_ALIAS
from .pooling import MaxPool2D #DEFINE_ALIAS
from .pooling import MaxPool3D #DEFINE_ALIAS
from .pooling import AdaptiveAvgPool1D #DEFINE_ALIAS
from .pooling import AdaptiveAvgPool2D #DEFINE_ALIAS
from .pooling import AdaptiveAvgPool3D #DEFINE_ALIAS
from .pooling import AdaptiveMaxPool1D #DEFINE_ALIAS
from .pooling import AdaptiveMaxPool2D #DEFINE_ALIAS
from .pooling import AdaptiveMaxPool3D #DEFINE_ALIAS
from .conv import Conv1D #DEFINE_ALIAS
from .conv import Conv2D #DEFINE_ALIAS
from .conv import Conv3D #DEFINE_ALIAS
from .conv import Conv1DTranspose #DEFINE_ALIAS
from .conv import Conv2DTranspose #DEFINE_ALIAS
from .conv import Conv3DTranspose #DEFINE_ALIAS
# from .conv import TreeConv #DEFINE_ALIAS
# from .conv import Conv1D #DEFINE_ALIAS
# from .loss import NCELoss #DEFINE_ALIAS
from .loss import BCEWithLogitsLoss #DEFINE_ALIAS
from .loss import CrossEntropyLoss #DEFINE_ALIAS
from .loss import MSELoss #DEFINE_ALIAS
from .loss import L1Loss #DEFINE_ALIAS
from .loss import NLLLoss #DEFINE_ALIAS
from .loss import BCELoss #DEFINE_ALIAS
from .loss import KLDivLoss #DEFINE_ALIAS
from .loss import MarginRankingLoss #DEFINE_ALIAS
from .loss import CTCLoss #DEFINE_ALIAS
from .loss import SmoothL1Loss #DEFINE_ALIAS
from .norm import BatchNorm #DEFINE_ALIAS
from .norm import SyncBatchNorm #DEFINE_ALIAS
from .norm import GroupNorm #DEFINE_ALIAS
from .norm import LayerNorm #DEFINE_ALIAS
from .norm import SpectralNorm #DEFINE_ALIAS
#from .norm import InstanceNorm #DEFINE_ALIAS
from .norm import LocalResponseNorm #DEFINE_ALIAS
# from .rnn import RNNCell #DEFINE_ALIAS
# from .rnn import GRUCell #DEFINE_ALIAS
# from .rnn import LSTMCell #DEFINE_ALIAS
from .vision import PixelShuffle #DEFINE_ALIAS
from .distance import PairwiseDistance #DEFINE_ALIAS
from .container import LayerDict #DEFINE_ALIAS
from .vision import PixelShuffle # noqa: F401
from .distance import PairwiseDistance # noqa: F401
from .container import LayerDict # noqa: F401
......@@ -14,33 +14,6 @@
# TODO: define activation functions of neural network
__all__ = [
'ELU',
'GELU',
'Hardshrink',
'Hardswish',
'Tanh',
'Hardtanh',
'PReLU',
'ReLU',
'ReLU6',
'SELU',
'LeakyReLU',
'Sigmoid',
'Silu',
'Hardsigmoid',
'Softmax',
'Softplus',
'Softshrink',
'Softsign',
'Swish',
'Tanhshrink',
'ThresholdedReLU',
'LogSigmoid',
'LogSoftmax',
'Maxout',
]
from ...fluid.dygraph import layers
from ...fluid import core
from ...fluid.framework import in_dygraph_mode
......
......@@ -14,30 +14,12 @@
# TODO: define the common classes to build a neural network
import paddle
from ...fluid.dygraph import Flatten #DEFINE_ALIAS
from ...fluid.dygraph import Flatten # noqa: F401
from ...fluid.dygraph import layers
from ...fluid.framework import in_dygraph_mode
from .. import functional as F
from ...fluid.framework import _dygraph_tracer
__all__ = [
'Embedding',
'Linear',
'Upsample',
'Pad1D',
'Pad2D',
'Pad3D',
'UpsamplingNearest2D',
'UpsamplingBilinear2D',
'CosineSimilarity',
'Dropout',
'Dropout2D',
'Dropout3D',
'Bilinear',
'AlphaDropout',
'Unfold',
]
def _npairs(x, n):
if isinstance(x, (paddle.Tensor, list)):
......
......@@ -14,15 +14,6 @@
# TODO: define classes of convolutional neural network
__all__ = [
'Conv1D',
'Conv2D',
'Conv3D',
'Conv1DTranspose',
'Conv2DTranspose',
'Conv3DTranspose',
]
import numpy as np
from ...fluid import get_flags
......
......@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['PairwiseDistance']
import numpy as np
import paddle
......
......@@ -21,20 +21,6 @@ import paddle
from .. import functional as F
from paddle.fluid.framework import core, in_dygraph_mode, _varbase_creator
__all__ = [
'BCEWithLogitsLoss',
'CrossEntropyLoss',
'HSigmoidLoss',
'MSELoss',
'L1Loss',
'NLLLoss',
'BCELoss',
'KLDivLoss',
'MarginRankingLoss',
'CTCLoss',
'SmoothL1Loss',
]
class BCEWithLogitsLoss(fluid.dygraph.Layer):
r"""
......@@ -295,7 +281,7 @@ class CrossEntropyLoss(fluid.dygraph.Layer):
Indicate whether compute softmax before cross_entropy.
Default is ``True``.
- **name** (stroptional)
- **name** (str, optional)
The name of the operator. Default is ``None`` .
For more information, please refer to :ref:`api_guide_Name` .
......@@ -318,7 +304,7 @@ class CrossEntropyLoss(fluid.dygraph.Layer):
- **label** (Tensor)
1. If soft_label=Falsethe shape is
1. If soft_label=False, the shape is
:math:`[N_1, N_2, ..., N_k]` or :math:`[N_1, N_2, ..., N_k, 1]`, k >= 1.
the data type is int32, int64, float32, float64, where each value is [0, C-1].
......
......@@ -28,13 +28,10 @@
# TODO: define normalization api
import six
#from ...fluid.dygraph.nn import InstanceNorm
from ...fluid.dygraph import BatchNorm #DEFINE_ALIAS
#from ...fluid.dygraph import GroupNorm #DEFINE_ALIAS
from ...fluid.dygraph import BatchNorm # noqa: F401
#from ...fluid.dygraph import LayerNorm #DEFINE_ALIAS
from ...fluid.dygraph import SpectralNorm #DEFINE_ALIAS
from ...fluid.dygraph import SpectralNorm # noqa: F401
from ...fluid.dygraph import layers
from ...framework import get_default_dtype, set_default_dtype
......@@ -53,12 +50,6 @@ import warnings
from ...fluid.dygraph.base import no_grad
from .. import functional as F
__all__ = [
'BatchNorm', 'GroupNorm', 'LayerNorm', 'SpectralNorm', 'BatchNorm1D',
'BatchNorm2D', 'BatchNorm3D', 'InstanceNorm1D', 'InstanceNorm2D',
'InstanceNorm3D', 'SyncBatchNorm', 'LocalResponseNorm'
]
class _InstanceNormBase(layers.Layer):
"""
......
......@@ -16,21 +16,6 @@ from ...fluid.dygraph import layers
from ...fluid.layer_helper import LayerHelper
from .. import functional as F
__all__ = [
'AvgPool1D',
'AvgPool2D',
'AvgPool3D',
'MaxPool1D',
'MaxPool2D',
'MaxPool3D',
'AdaptiveAvgPool1D',
'AdaptiveAvgPool2D',
'AdaptiveAvgPool3D',
'AdaptiveMaxPool1D',
'AdaptiveMaxPool2D',
'AdaptiveMaxPool3D',
]
class AvgPool1D(layers.Layer):
r"""
......
......@@ -33,18 +33,6 @@ from paddle.fluid.layers import utils
from paddle.fluid.layers.utils import map_structure, flatten, pack_sequence_as
from paddle.fluid.data_feeder import convert_dtype
__all__ = [
'RNNCellBase',
'SimpleRNNCell',
'LSTMCell',
'GRUCell',
'RNN',
'BiRNN',
'SimpleRNN',
'LSTM',
'GRU',
]
def split_states(states, bidirectional=False, state_components=1):
r"""
......
......@@ -13,14 +13,6 @@
# limitations under the License.
# TODO: define the classes of Transformer neural network
__all__ = [
'MultiHeadAttention',
'TransformerEncoderLayer',
'TransformerEncoder',
'TransformerDecoderLayer',
'TransformerDecoder',
'Transformer',
]
import copy
import collections
......
......@@ -17,8 +17,6 @@
from ...fluid.dygraph import layers
from .. import functional
__all__ = ['PixelShuffle']
class PixelShuffle(layers.Layer):
"""
......
......@@ -12,5 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from . import weight_norm_hook
from .weight_norm_hook import weight_norm, remove_weight_norm
from .weight_norm_hook import weight_norm, remove_weight_norm # noqa: F401
__all__ = [ #noqa
'weight_norm', 'remove_weight_norm'
]
......@@ -19,8 +19,6 @@ from ...fluid import layers as F
from ...fluid.layer_helper import LayerHelper
from ...fluid.data_feeder import check_variable_and_dtype
__all__ = ['weight_norm', 'remove_weight_norm']
def l2_norm(x, axis, epsilon=1e-12, name=None):
if len(x.shape) == 1:
......
......@@ -83,13 +83,14 @@ def deprecated(update_to="", since="", reason=""):
2. since version is empty, in this case, API is deprecated in all versions.
3. current version is newer than since version.
"""
msg = "\033[93mWarning %s \033[0m" % (msg)
warningmsg = "\033[93mWarning %s \033[0m" % (msg)
v_current = [int(i) for i in paddle.__version__.split(".")]
v_current += [0] * (4 - len(v_current))
v_since = [int(i) for i in _since.split(".")]
v_since += [0] * (4 - len(v_since))
if paddle.__version__ == "0.0.0" or _since == "" or v_current >= v_since:
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
warnings.warn(
warningmsg, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册