From 217ca776cc1d7361e15e46dfdacf5df25123208f Mon Sep 17 00:00:00 2001 From: XiaoguangHu <46782768+XiaoguangHu01@users.noreply.github.com> Date: Fri, 15 May 2020 07:29:59 +0800 Subject: [PATCH] [for 2.0-alpha] add alias in paddle.nn and paddle.tensor test=develop (#24561) * add alias in paddle.nn and paddle.tensor test=develop * add alias in paddle.nn and paddle.tensor dir test=develop * fix same conflict manually test=develop * update fc and dygraph alias test=develop * fix initalizer.py typo test=develop --- python/paddle/__init__.py | 22 ++++---- python/paddle/nn/__init__.py | 7 +-- python/paddle/nn/clip.py | 4 +- python/paddle/nn/control_flow.py | 4 +- python/paddle/nn/decode.py | 4 +- python/paddle/nn/functional/__init__.py | 40 +++++++------- python/paddle/nn/functional/common.py | 5 +- python/paddle/nn/functional/conv.py | 16 +++--- python/paddle/nn/functional/extension.py | 30 +++++++---- python/paddle/nn/functional/loss.py | 13 +++-- python/paddle/nn/functional/vision.py | 22 +++++--- python/paddle/nn/layer/activation.py | 16 +++--- python/paddle/nn/layer/conv.py | 16 +++--- python/paddle/nn/layer/extension.py | 4 +- python/paddle/nn/layer/loss.py | 20 +++---- python/paddle/tensor/__init__.py | 21 ++++---- python/paddle/tensor/creation.py | 48 +++++++++-------- python/paddle/tensor/linalg.py | 32 +++++------ python/paddle/tensor/logic.py | 12 ++--- python/paddle/tensor/manipulation.py | 69 ++++++++++-------------- python/paddle/tensor/math.py | 11 ++-- python/paddle/tensor/random.py | 16 +++--- python/paddle/tensor/search.py | 24 ++++----- python/paddle/tensor/stat.py | 8 +-- 24 files changed, 244 insertions(+), 220 deletions(-) diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index f660fb30390..83f1d0439e1 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -43,10 +43,12 @@ import paddle.metric import paddle.incubate.complex as complex # TODO: define alias in tensor and framework directory + from .tensor.random import randperm + from .tensor.attribute import rank #DEFINE_ALIAS from .tensor.attribute import shape #DEFINE_ALIAS -# from .tensor.creation import create_tensor #DEFINE_ALIAS +from .tensor.creation import create_tensor #DEFINE_ALIAS # from .tensor.creation import create_lod_tensor #DEFINE_ALIAS # from .tensor.creation import create_random_int_lodtensor #DEFINE_ALIAS from .tensor.creation import crop_tensor #DEFINE_ALIAS @@ -102,13 +104,13 @@ from .tensor.manipulation import expand #DEFINE_ALIAS from .tensor.manipulation import expand_as #DEFINE_ALIAS from .tensor.manipulation import flatten #DEFINE_ALIAS from .tensor.manipulation import gather #DEFINE_ALIAS -# from .tensor.manipulation import gather_nd #DEFINE_ALIAS +from .tensor.manipulation import gather_nd #DEFINE_ALIAS from .tensor.manipulation import reshape #DEFINE_ALIAS from .tensor.manipulation import reverse #DEFINE_ALIAS from .tensor.manipulation import scatter #DEFINE_ALIAS -# from .tensor.manipulation import scatter_nd_add #DEFINE_ALIAS -# from .tensor.manipulation import scatter_nd #DEFINE_ALIAS -# from .tensor.manipulation import shard_index #DEFINE_ALIAS +from .tensor.manipulation import scatter_nd_add #DEFINE_ALIAS +from .tensor.manipulation import scatter_nd #DEFINE_ALIAS +from .tensor.manipulation import shard_index #DEFINE_ALIAS from .tensor.manipulation import slice #DEFINE_ALIAS from .tensor.manipulation import split #DEFINE_ALIAS from .tensor.manipulation import squeeze #DEFINE_ALIAS @@ -116,11 +118,11 @@ from .tensor.manipulation import stack #DEFINE_ALIAS from .tensor.manipulation import strided_slice #DEFINE_ALIAS from .tensor.manipulation import transpose #DEFINE_ALIAS from .tensor.manipulation import unique #DEFINE_ALIAS -# from .tensor.manipulation import unique_with_counts #DEFINE_ALIAS +from .tensor.manipulation import unique_with_counts #DEFINE_ALIAS from .tensor.manipulation import unsqueeze #DEFINE_ALIAS from .tensor.manipulation import unstack #DEFINE_ALIAS from .tensor.manipulation import flip #DEFINE_ALIAS -# from .tensor.manipulation import unbind #DEFINE_ALIAS +from .tensor.manipulation import unbind #DEFINE_ALIAS from .tensor.manipulation import roll #DEFINE_ALIAS from .tensor.math import abs #DEFINE_ALIAS from .tensor.math import acos #DEFINE_ALIAS @@ -140,10 +142,10 @@ from .tensor.math import elementwise_pow #DEFINE_ALIAS from .tensor.math import elementwise_sub #DEFINE_ALIAS from .tensor.math import exp #DEFINE_ALIAS from .tensor.math import floor #DEFINE_ALIAS -# from .tensor.math import increment #DEFINE_ALIAS +from .tensor.math import increment #DEFINE_ALIAS from .tensor.math import log #DEFINE_ALIAS from .tensor.math import mul #DEFINE_ALIAS -# from .tensor.math import multiplex #DEFINE_ALIAS +from .tensor.math import multiplex #DEFINE_ALIAS from .tensor.math import pow #DEFINE_ALIAS from .tensor.math import reciprocal #DEFINE_ALIAS from .tensor.math import reduce_max #DEFINE_ALIAS @@ -159,7 +161,7 @@ from .tensor.math import sqrt #DEFINE_ALIAS from .tensor.math import square #DEFINE_ALIAS from .tensor.math import stanh #DEFINE_ALIAS from .tensor.math import sum #DEFINE_ALIAS -# from .tensor.math import sums #DEFINE_ALIAS +from .tensor.math import sums #DEFINE_ALIAS from .tensor.math import tanh #DEFINE_ALIAS from .tensor.math import elementwise_sum #DEFINE_ALIAS from .tensor.math import max #DEFINE_ALIAS diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index 016f89a515a..607c47c9a8c 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -33,12 +33,12 @@ from .clip import GradientClipByNorm #DEFINE_ALIAS from .clip import GradientClipByValue #DEFINE_ALIAS # from .clip import set_gradient_clip #DEFINE_ALIAS from .clip import clip #DEFINE_ALIAS -# from .clip import clip_by_norm #DEFINE_ALIAS +from .clip import clip_by_norm #DEFINE_ALIAS from .control_flow import case #DEFINE_ALIAS from .control_flow import cond #DEFINE_ALIAS # from .control_flow import DynamicRNN #DEFINE_ALIAS # from .control_flow import StaticRNN #DEFINE_ALIAS -# from .control_flow import switch_case #DEFINE_ALIAS +from .control_flow import switch_case #DEFINE_ALIAS from .control_flow import while_loop #DEFINE_ALIAS # from .control_flow import rnn #DEFINE_ALIAS # from .decode import BeamSearchDecoder #DEFINE_ALIAS @@ -48,7 +48,7 @@ from .decode import beam_search_decode #DEFINE_ALIAS # from .decode import crf_decoding #DEFINE_ALIAS # from .decode import ctc_greedy_decoder #DEFINE_ALIAS # from .decode import dynamic_decode #DEFINE_ALIAS -# from .decode import gather_tree #DEFINE_ALIAS +from .decode import gather_tree #DEFINE_ALIAS from .input import data #DEFINE_ALIAS # from .input import Input #DEFINE_ALIAS # from .layer.activation import PReLU #DEFINE_ALIAS @@ -90,6 +90,7 @@ from .layer.norm import InstanceNorm #DEFINE_ALIAS # from .layer.rnn import RNNCell #DEFINE_ALIAS # from .layer.rnn import GRUCell #DEFINE_ALIAS # from .layer.rnn import LSTMCell #DEFINE_ALIAS + from .layer import loss #DEFINE_ALIAS from .layer import conv #DEFINE_ALIAS from ..fluid.dygraph.layers import Layer #DEFINE_ALIAS diff --git a/python/paddle/nn/clip.py b/python/paddle/nn/clip.py index 7d528ce1531..a50dad628cf 100644 --- a/python/paddle/nn/clip.py +++ b/python/paddle/nn/clip.py @@ -18,6 +18,8 @@ from ..fluid.clip import GradientClipByNorm #DEFINE_ALIAS from ..fluid.clip import GradientClipByValue #DEFINE_ALIAS from ..fluid.layers import clip #DEFINE_ALIAS +from ..fluid.layers import clip_by_norm #DEFINE_ALIAS + __all__ = [ # 'ErrorClipByValue', 'GradientClipByGlobalNorm', @@ -25,5 +27,5 @@ __all__ = [ 'GradientClipByValue', # 'set_gradient_clip', 'clip', - # 'clip_by_norm' + 'clip_by_norm' ] diff --git a/python/paddle/nn/control_flow.py b/python/paddle/nn/control_flow.py index 7526ee1abae..d3b1ec700fe 100644 --- a/python/paddle/nn/control_flow.py +++ b/python/paddle/nn/control_flow.py @@ -17,12 +17,14 @@ from ..fluid.layers import case #DEFINE_ALIAS from ..fluid.layers import cond #DEFINE_ALIAS from ..fluid.layers import while_loop #DEFINE_ALIAS +from ..fluid.layers import switch_case #DEFINE_ALIAS + __all__ = [ 'case', 'cond', # 'DynamicRNN', # 'StaticRNN', - # 'switch_case', + 'switch_case', 'while_loop', # 'rnn' ] diff --git a/python/paddle/nn/decode.py b/python/paddle/nn/decode.py index ad36021fa58..f01a5ed15b6 100644 --- a/python/paddle/nn/decode.py +++ b/python/paddle/nn/decode.py @@ -16,6 +16,8 @@ from ..fluid.layers import beam_search #DEFINE_ALIAS from ..fluid.layers import beam_search_decode #DEFINE_ALIAS +from ..fluid.layers import gather_tree #DEFINE_ALIAS + __all__ = [ # 'BeamSearchDecoder', # 'Decoder', @@ -24,5 +26,5 @@ __all__ = [ # 'crf_decoding', # 'ctc_greedy_decoder', # 'dynamic_decode', - # 'gather_tree' + 'gather_tree' ] diff --git a/python/paddle/nn/functional/__init__.py b/python/paddle/nn/functional/__init__.py index c02e1893684..3fefb1b053e 100644 --- a/python/paddle/nn/functional/__init__.py +++ b/python/paddle/nn/functional/__init__.py @@ -52,11 +52,11 @@ from .activation import thresholded_relu #DEFINE_ALIAS from .activation import log_softmax #DEFINE_ALIAS from .common import dropout #DEFINE_ALIAS # from .common import embedding #DEFINE_ALIAS -# from .common import fc #DEFINE_ALIAS +# from .common import fc #DEFINE_ALIAS from .common import label_smooth #DEFINE_ALIAS from .common import one_hot #DEFINE_ALIAS from .common import pad #DEFINE_ALIAS -# from .common import pad_constant_like #DEFINE_ALIAS +from .common import pad_constant_like #DEFINE_ALIAS from .common import pad2d #DEFINE_ALIAS from .common import unfold #DEFINE_ALIAS # from .common import bilinear_tensor_product #DEFINE_ALIAS @@ -68,19 +68,19 @@ from .conv import conv3d #DEFINE_ALIAS from .conv import conv3d_transpose #DEFINE_ALIAS from .extension import add_position_encoding #DEFINE_ALIAS # from .extension import autoincreased_step_counter #DEFINE_ALIAS -# from .extension import continuous_value_model #DEFINE_ALIAS -# from .extension import filter_by_instag #DEFINE_ALIAS +from .extension import continuous_value_model #DEFINE_ALIAS +from .extension import filter_by_instag #DEFINE_ALIAS # from .extension import linear_chain_crf #DEFINE_ALIAS # from .extension import merge_selected_rows #DEFINE_ALIAS from .extension import multiclass_nms #DEFINE_ALIAS -# from .extension import polygon_box_transform #DEFINE_ALIAS -# from .extension import random_crop #DEFINE_ALIAS +from .extension import polygon_box_transform #DEFINE_ALIAS +from .extension import random_crop #DEFINE_ALIAS from .extension import row_conv #DEFINE_ALIAS -# from .extension import rpn_target_assign #DEFINE_ALIAS -# from .extension import similarity_focus #DEFINE_ALIAS +from .extension import rpn_target_assign #DEFINE_ALIAS +from .extension import similarity_focus #DEFINE_ALIAS from .extension import target_assign #DEFINE_ALIAS from .extension import temporal_shift #DEFINE_ALIAS -# from .extension import warpctc #DEFINE_ALIAS +from .extension import warpctc #DEFINE_ALIAS from .extension import diag_embed #DEFINE_ALIAS from .learning_rate import cosine_decay #DEFINE_ALIAS from .learning_rate import exponential_decay #DEFINE_ALIAS @@ -123,17 +123,17 @@ from .loss import bpr_loss #DEFINE_ALIAS from .loss import center_loss #DEFINE_ALIAS from .loss import cross_entropy #DEFINE_ALIAS from .loss import dice_loss #DEFINE_ALIAS -# from .loss import edit_distance #DEFINE_ALIAS -# from .loss import huber_loss #DEFINE_ALIAS +from .loss import edit_distance #DEFINE_ALIAS +from .loss import huber_loss #DEFINE_ALIAS from .loss import iou_similarity #DEFINE_ALIAS from .loss import kldiv_loss #DEFINE_ALIAS from .loss import log_loss #DEFINE_ALIAS -# from .loss import margin_rank_loss #DEFINE_ALIAS +from .loss import margin_rank_loss #DEFINE_ALIAS from .loss import mse_loss #DEFINE_ALIAS # from .loss import nce #DEFINE_ALIAS from .loss import npair_loss #DEFINE_ALIAS from .loss import rank_loss #DEFINE_ALIAS -# from .loss import sampled_softmax_with_cross_entropy #DEFINE_ALIAS +from .loss import sampled_softmax_with_cross_entropy #DEFINE_ALIAS from .loss import sigmoid_cross_entropy_with_logits #DEFINE_ALIAS from .loss import sigmoid_focal_loss #DEFINE_ALIAS from .loss import smooth_l1 #DEFINE_ALIAS @@ -169,27 +169,27 @@ from .vision import deformable_roi_pooling #DEFINE_ALIAS from .vision import density_prior_box #DEFINE_ALIAS from .vision import detection_output #DEFINE_ALIAS from .vision import distribute_fpn_proposals #DEFINE_ALIAS -# from .vision import fsp_matrix #DEFINE_ALIAS +from .vision import fsp_matrix #DEFINE_ALIAS from .vision import generate_mask_labels #DEFINE_ALIAS from .vision import generate_proposal_labels #DEFINE_ALIAS from .vision import generate_proposals #DEFINE_ALIAS from .vision import grid_sampler #DEFINE_ALIAS from .vision import image_resize #DEFINE_ALIAS -# from .vision import image_resize_short #DEFINE_ALIAS +from .vision import image_resize_short #DEFINE_ALIAS # from .vision import multi_box_head #DEFINE_ALIAS -# from .vision import pixel_shuffle #DEFINE_ALIAS +from .vision import pixel_shuffle #DEFINE_ALIAS from .vision import prior_box #DEFINE_ALIAS from .vision import prroi_pool #DEFINE_ALIAS from .vision import psroi_pool #DEFINE_ALIAS from .vision import resize_bilinear #DEFINE_ALIAS from .vision import resize_nearest #DEFINE_ALIAS from .vision import resize_trilinear #DEFINE_ALIAS -# from .vision import retinanet_detection_output #DEFINE_ALIAS -# from .vision import retinanet_target_assign #DEFINE_ALIAS +from .vision import retinanet_detection_output #DEFINE_ALIAS +from .vision import retinanet_target_assign #DEFINE_ALIAS from .vision import roi_align #DEFINE_ALIAS -# from .vision import roi_perspective_transform #DEFINE_ALIAS +from .vision import roi_perspective_transform #DEFINE_ALIAS from .vision import roi_pool #DEFINE_ALIAS -# from .vision import shuffle_channel #DEFINE_ALIAS +from .vision import shuffle_channel #DEFINE_ALIAS from .vision import space_to_depth #DEFINE_ALIAS from .vision import yolo_box #DEFINE_ALIAS from .vision import yolov3_loss #DEFINE_ALIAS diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index 9ae6e02d423..4e65f9b67c6 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -25,6 +25,9 @@ from ...fluid.layers import pad2d #DEFINE_ALIAS from ...fluid.layers import unfold #DEFINE_ALIAS from ...fluid.layers import assign #DEFINE_ALIAS +#from ...fluid.layers import fc #DEFINE_ALIAS +from ...fluid.layers import pad_constant_like #DEFINE_ALIAS + __all__ = [ 'dropout', # 'embedding', @@ -32,7 +35,7 @@ __all__ = [ 'label_smooth', 'one_hot', 'pad', - # 'pad_constant_like', + 'pad_constant_like', 'pad2d', 'unfold', # 'bilinear_tensor_product', diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index b27623d8db9..2a519718258 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -99,8 +99,8 @@ def conv2d(input, data_format="NCHW", name=None): """ - :alias_main: paddle.nn.functional.conv2d - :alias: paddle.nn.functional.conv2d,paddle.nn.functional.conv.conv2d + :alias_main: paddle.nn.functional.conv2d + :alias: paddle.nn.functional.conv2d,paddle.nn.functional.conv.conv2d The convolution2D layer calculates the output based on the input, filter and strides, paddings, dilations, groups parameters. Input and @@ -336,8 +336,8 @@ def conv2d_transpose(input, data_format='NCHW', name=None): """ - :alias_main: paddle.nn.functional.conv2d_transpose - :alias: paddle.nn.functional.conv2d_transpose,paddle.nn.functional.conv.conv2d_transpose + :alias_main: paddle.nn.functional.conv2d_transpose + :alias: paddle.nn.functional.conv2d_transpose,paddle.nn.functional.conv.conv2d_transpose The convolution2D transpose layer calculates the output based on the input, filter, and dilations, strides, paddings. Input(Input) and output(Output) @@ -578,8 +578,8 @@ def conv3d(input, data_format="NCDHW", name=None): """ - :alias_main: paddle.nn.functional.conv3d - :alias: paddle.nn.functional.conv3d,paddle.nn.functional.conv.conv3d + :alias_main: paddle.nn.functional.conv3d + :alias: paddle.nn.functional.conv3d,paddle.nn.functional.conv.conv3d The convolution3D layer calculates the output based on the input, filter and strides, paddings, dilations, groups parameters. Input(Input) and @@ -795,8 +795,8 @@ def conv3d_transpose(input, data_format='NCDHW', name=None): """ - :alias_main: paddle.nn.functional.conv3d_transpose - :alias: paddle.nn.functional.conv3d_transpose,paddle.nn.functional.conv.conv3d_transpose + :alias_main: paddle.nn.functional.conv3d_transpose + :alias: paddle.nn.functional.conv3d_transpose,paddle.nn.functional.conv.conv3d_transpose The convolution3D transpose layer calculates the output based on the input, filter, and dilations, strides, paddings. Input(Input) and output(Output) diff --git a/python/paddle/nn/functional/extension.py b/python/paddle/nn/functional/extension.py index 0006b021908..87210b3832f 100644 --- a/python/paddle/nn/functional/extension.py +++ b/python/paddle/nn/functional/extension.py @@ -18,22 +18,30 @@ from ...fluid.layers import multiclass_nms #DEFINE_ALIAS from ...fluid.layers import target_assign #DEFINE_ALIAS from ...fluid.layers import temporal_shift #DEFINE_ALIAS +from ...fluid.layers import continuous_value_model #DEFINE_ALIAS +from ...fluid.layers import filter_by_instag #DEFINE_ALIAS +from ...fluid.layers import polygon_box_transform #DEFINE_ALIAS +from ...fluid.layers import random_crop #DEFINE_ALIAS +from ...fluid.layers import rpn_target_assign #DEFINE_ALIAS +from ...fluid.layers import similarity_focus #DEFINE_ALIAS +from ...fluid.layers import warpctc #DEFINE_ALIAS + __all__ = [ 'add_position_encoding', # 'autoincreased_step_counter', - # 'continuous_value_model', - # 'filter_by_instag', + 'continuous_value_model', + 'filter_by_instag', # 'linear_chain_crf', # 'merge_selected_rows', 'multiclass_nms', - # 'polygon_box_transform', - # 'random_crop', + 'polygon_box_transform', + 'random_crop', 'row_conv', - # 'rpn_target_assign', - # 'similarity_focus', + 'rpn_target_assign', + 'similarity_focus', 'target_assign', 'temporal_shift', - # 'warpctc', + 'warpctc', 'diag_embed' ] @@ -48,8 +56,8 @@ from ...fluid.layers.layer_function_generator import templatedoc def diag_embed(input, offset=0, dim1=-2, dim2=-1): """ - :alias_main: paddle.nn.functional.diag_embed - :alias: paddle.nn.functional.diag_embed,paddle.nn.functional.extension.diag_embed + :alias_main: paddle.nn.functional.diag_embed + :alias: paddle.nn.functional.diag_embed,paddle.nn.functional.extension.diag_embed This OP creates a tensor whose diagonals of certain 2D planes (specified by dim1 and dim2) are filled by ``input``. By default, a 2D plane formed by the last two dimensions @@ -168,8 +176,8 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1): @templatedoc() def row_conv(input, weight, act=None): """ - :alias_main: paddle.nn.functional.row_conv - :alias: paddle.nn.functional.row_conv,paddle.nn.functional.extension.row_conv + :alias_main: paddle.nn.functional.row_conv + :alias: paddle.nn.functional.row_conv,paddle.nn.functional.extension.row_conv ${comment} diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 112cd112f2d..bc6d26370f0 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -31,22 +31,27 @@ from ...fluid.layers import square_error_cost #DEFINE_ALIAS from ...fluid.layers import ssd_loss #DEFINE_ALIAS from ...fluid.layers import teacher_student_sigmoid_loss #DEFINE_ALIAS +from ...fluid.layers import edit_distance #DEFINE_ALIAS +from ...fluid.layers import huber_loss #DEFINE_ALIAS +from ...fluid.layers import margin_rank_loss #DEFINE_ALIAS +from ...fluid.layers import sampled_softmax_with_cross_entropy #DEFINE_ALIAS + __all__ = [ 'bpr_loss', 'center_loss', 'cross_entropy', 'dice_loss', - # 'edit_distance', - # 'huber_loss', + 'edit_distance', + 'huber_loss', 'iou_similarity', 'kldiv_loss', 'log_loss', - # 'margin_rank_loss', + 'margin_rank_loss', 'mse_loss', # 'nce', 'npair_loss', 'rank_loss', - # 'sampled_softmax_with_cross_entropy', + 'sampled_softmax_with_cross_entropy', 'sigmoid_cross_entropy_with_logits', 'sigmoid_focal_loss', 'smooth_l1', diff --git a/python/paddle/nn/functional/vision.py b/python/paddle/nn/functional/vision.py index a1dd90acaad..a2cc8fde5ad 100644 --- a/python/paddle/nn/functional/vision.py +++ b/python/paddle/nn/functional/vision.py @@ -42,6 +42,14 @@ from ...fluid.layers import space_to_depth #DEFINE_ALIAS from ...fluid.layers import yolo_box #DEFINE_ALIAS from ...fluid.layers import yolov3_loss #DEFINE_ALIAS +from ...fluid.layers import fsp_matrix #DEFINE_ALIAS +from ...fluid.layers import image_resize_short #DEFINE_ALIAS +from ...fluid.layers import pixel_shuffle #DEFINE_ALIAS +from ...fluid.layers import retinanet_detection_output #DEFINE_ALIAS +from ...fluid.layers import retinanet_target_assign #DEFINE_ALIAS +from ...fluid.layers import roi_perspective_transform #DEFINE_ALIAS +from ...fluid.layers import shuffle_channel #DEFINE_ALIAS + __all__ = [ 'affine_channel', 'affine_grid', @@ -56,27 +64,27 @@ __all__ = [ 'density_prior_box', 'detection_output', 'distribute_fpn_proposals', - # 'fsp_matrix', + 'fsp_matrix', 'generate_mask_labels', 'generate_proposal_labels', 'generate_proposals', 'grid_sampler', 'image_resize', - # 'image_resize_short', + 'image_resize_short', # 'multi_box_head', - # 'pixel_shuffle', + 'pixel_shuffle', 'prior_box', 'prroi_pool', 'psroi_pool', 'resize_bilinear', 'resize_nearest', 'resize_trilinear', - # 'retinanet_detection_output', - # 'retinanet_target_assign', + 'retinanet_detection_output', + 'retinanet_target_assign', 'roi_align', - # 'roi_perspective_transform', + 'roi_perspective_transform', 'roi_pool', - # 'shuffle_channel', + 'shuffle_channel', 'space_to_depth', 'yolo_box', 'yolov3_loss' diff --git a/python/paddle/nn/layer/activation.py b/python/paddle/nn/layer/activation.py index 7d39b35d79f..b30b651b79a 100644 --- a/python/paddle/nn/layer/activation.py +++ b/python/paddle/nn/layer/activation.py @@ -31,8 +31,8 @@ from .. import functional class HSigmoid(layers.Layer): """ - :alias_main: paddle.nn.HSigmoid - :alias: paddle.nn.HSigmoid,paddle.nn.layer.HSigmoid,paddle.nn.layer.activation.HSigmoid + :alias_main: paddle.nn.HSigmoid + :alias: paddle.nn.HSigmoid,paddle.nn.layer.HSigmoid,paddle.nn.layer.activation.HSigmoid Hierarchical Sigmoid Layer. @@ -167,8 +167,8 @@ class HSigmoid(layers.Layer): class ReLU(layers.Layer): """ - :alias_main: paddle.nn.ReLU - :alias: paddle.nn.ReLU,paddle.nn.layer.ReLU,paddle.nn.layer.activation.ReLU + :alias_main: paddle.nn.ReLU + :alias: paddle.nn.ReLU,paddle.nn.layer.ReLU,paddle.nn.layer.activation.ReLU ReLU Activation. @@ -209,8 +209,8 @@ class ReLU(layers.Layer): class Sigmoid(layers.Layer): """ - :alias_main: paddle.nn.Sigmoid - :alias: paddle.nn.Sigmoid,paddle.nn.layer.Sigmoid,paddle.nn.layer.activation.Sigmoid + :alias_main: paddle.nn.Sigmoid + :alias: paddle.nn.Sigmoid,paddle.nn.layer.Sigmoid,paddle.nn.layer.activation.Sigmoid Sigmoid Activation. @@ -254,8 +254,8 @@ class Sigmoid(layers.Layer): class LogSoftmax(layers.Layer): """ - :alias_main: paddle.nn.LogSoftmax - :alias: paddle.nn.LogSoftmax,paddle.nn.layer.LogSoftmax,paddle.nn.layer.activation.LogSoftmax + :alias_main: paddle.nn.LogSoftmax + :alias: paddle.nn.LogSoftmax,paddle.nn.layer.LogSoftmax,paddle.nn.layer.activation.LogSoftmax This operator implements the log_softmax layer. The calculation process is as follows: diff --git a/python/paddle/nn/layer/conv.py b/python/paddle/nn/layer/conv.py index bbb352a1137..9fb6c9ebc2e 100644 --- a/python/paddle/nn/layer/conv.py +++ b/python/paddle/nn/layer/conv.py @@ -40,8 +40,8 @@ def _get_default_param_initializer(num_channels, filter_size): class Conv2D(layers.Layer): """ - :alias_main: paddle.nn.Conv2D - :alias: paddle.nn.Conv2D,paddle.nn.layer.Conv2D,paddle.nn.layer.conv.Conv2D + :alias_main: paddle.nn.Conv2D + :alias: paddle.nn.Conv2D,paddle.nn.layer.Conv2D,paddle.nn.layer.conv.Conv2D This interface is used to construct a callable object of the ``Conv2D`` class. For more details, refer to code examples. @@ -238,8 +238,8 @@ class Conv2D(layers.Layer): class Conv2DTranspose(layers.Layer): """ - :alias_main: paddle.nn.Conv2DTranspose - :alias: paddle.nn.Conv2DTranspose,paddle.nn.layer.Conv2DTranspose,paddle.nn.layer.conv.Conv2DTranspose + :alias_main: paddle.nn.Conv2DTranspose + :alias: paddle.nn.Conv2DTranspose,paddle.nn.layer.Conv2DTranspose,paddle.nn.layer.conv.Conv2DTranspose This interface is used to construct a callable object of the ``Conv2DTranspose`` class. For more details, refer to code examples. @@ -437,8 +437,8 @@ class Conv2DTranspose(layers.Layer): class Conv3D(layers.Layer): """ - :alias_main: paddle.nn.Conv3D - :alias: paddle.nn.Conv3D,paddle.nn.layer.Conv3D,paddle.nn.layer.conv.Conv3D + :alias_main: paddle.nn.Conv3D + :alias: paddle.nn.Conv3D,paddle.nn.layer.Conv3D,paddle.nn.layer.conv.Conv3D **Convlution3D Layer** @@ -630,8 +630,8 @@ class Conv3D(layers.Layer): class Conv3DTranspose(layers.Layer): """ - :alias_main: paddle.nn.Conv3DTranspose - :alias: paddle.nn.Conv3DTranspose,paddle.nn.layer.Conv3DTranspose,paddle.nn.layer.conv.Conv3DTranspose + :alias_main: paddle.nn.Conv3DTranspose + :alias: paddle.nn.Conv3DTranspose,paddle.nn.layer.Conv3DTranspose,paddle.nn.layer.conv.Conv3DTranspose **Convlution3D transpose layer** diff --git a/python/paddle/nn/layer/extension.py b/python/paddle/nn/layer/extension.py index 02fd7b17e9e..01ca472315f 100644 --- a/python/paddle/nn/layer/extension.py +++ b/python/paddle/nn/layer/extension.py @@ -20,8 +20,8 @@ from .. import functional as F class RowConv(layers.Layer): """ - :alias_main: paddle.nn.RowConv - :alias: paddle.nn.RowConv,paddle.nn.layer.RowConv,paddle.nn.layer.extension.RowConv + :alias_main: paddle.nn.RowConv + :alias: paddle.nn.RowConv,paddle.nn.layer.RowConv,paddle.nn.layer.extension.RowConv **Row-convolution operator** diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index a1212b1a239..d858d135262 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -28,8 +28,8 @@ __all__ = [ class CrossEntropyLoss(fluid.dygraph.Layer): """ - :alias_main: paddle.nn.CrossEntropyLoss - :alias: paddle.nn.CrossEntropyLoss,paddle.nn.layer.CrossEntropyLoss,paddle.nn.layer.loss.CrossEntropyLoss + :alias_main: paddle.nn.CrossEntropyLoss + :alias: paddle.nn.CrossEntropyLoss,paddle.nn.layer.CrossEntropyLoss,paddle.nn.layer.loss.CrossEntropyLoss This operator implements the cross entropy loss function. This OP combines ``LogSoftmax``, and ``NLLLoss`` together. @@ -146,8 +146,8 @@ class CrossEntropyLoss(fluid.dygraph.Layer): class MSELoss(fluid.dygraph.layers.Layer): """ - :alias_main: paddle.nn.MSELoss - :alias: paddle.nn.MSELoss,paddle.nn.layer.MSELoss,paddle.nn.layer.loss.MSELoss + :alias_main: paddle.nn.MSELoss + :alias: paddle.nn.MSELoss,paddle.nn.layer.MSELoss,paddle.nn.layer.loss.MSELoss **Mean Square Error Loss** Computes the mean square error (squared L2 norm) of given input and label. @@ -250,8 +250,8 @@ class MSELoss(fluid.dygraph.layers.Layer): class L1Loss(fluid.dygraph.Layer): """ - :alias_main: paddle.nn.L1Loss - :alias: paddle.nn.L1Loss,paddle.nn.layer.L1Loss,paddle.nn.layer.loss.L1Loss + :alias_main: paddle.nn.L1Loss + :alias: paddle.nn.L1Loss,paddle.nn.layer.L1Loss,paddle.nn.layer.loss.L1Loss This interface is used to construct a callable object of the ``L1Loss`` class. The L1Loss layer calculates the L1 Loss of input predictions and target @@ -340,8 +340,8 @@ class L1Loss(fluid.dygraph.Layer): class BCELoss(fluid.dygraph.Layer): """ - :alias_main: paddle.nn.BCELoss - :alias: paddle.nn.BCELoss,paddle.nn.layer.BCELoss,paddle.nn.layer.loss.BCELoss + :alias_main: paddle.nn.BCELoss + :alias: paddle.nn.BCELoss,paddle.nn.layer.BCELoss,paddle.nn.layer.loss.BCELoss This interface is used to construct a callable object of the ``BCELoss`` class. The BCELoss layer measures the binary_cross_entropy loss between input predictions @@ -468,8 +468,8 @@ class BCELoss(fluid.dygraph.Layer): class NLLLoss(fluid.dygraph.Layer): """ - :alias_main: paddle.nn.NLLLoss - :alias: paddle.nn.NLLLoss,paddle.nn.layer.NLLLoss,paddle.nn.layer.loss.NLLLoss + :alias_main: paddle.nn.NLLLoss + :alias: paddle.nn.NLLLoss,paddle.nn.layer.NLLLoss,paddle.nn.layer.loss.NLLLoss This op accepts input and target label and returns negative log likelihood cross error. It is useful to train a classification problem with C classes. diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index b83d85fac6d..2c310115449 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -22,7 +22,7 @@ from __future__ import print_function from .random import randperm from .attribute import rank #DEFINE_ALIAS from .attribute import shape #DEFINE_ALIAS -# from .creation import create_tensor #DEFINE_ALIAS +from .creation import create_tensor #DEFINE_ALIAS # from .creation import create_lod_tensor #DEFINE_ALIAS # from .creation import create_random_int_lodtensor #DEFINE_ALIAS from .creation import crop_tensor #DEFINE_ALIAS @@ -78,13 +78,13 @@ from .manipulation import expand #DEFINE_ALIAS from .manipulation import expand_as #DEFINE_ALIAS from .manipulation import flatten #DEFINE_ALIAS from .manipulation import gather #DEFINE_ALIAS -# from .manipulation import gather_nd #DEFINE_ALIAS +from .manipulation import gather_nd #DEFINE_ALIAS from .manipulation import reshape #DEFINE_ALIAS from .manipulation import reverse #DEFINE_ALIAS from .manipulation import scatter #DEFINE_ALIAS -# from .manipulation import scatter_nd_add #DEFINE_ALIAS -# from .manipulation import scatter_nd #DEFINE_ALIAS -# from .manipulation import shard_index #DEFINE_ALIAS +from .manipulation import scatter_nd_add #DEFINE_ALIAS +from .manipulation import scatter_nd #DEFINE_ALIAS +from .manipulation import shard_index #DEFINE_ALIAS from .manipulation import slice #DEFINE_ALIAS from .manipulation import split #DEFINE_ALIAS from .manipulation import squeeze #DEFINE_ALIAS @@ -92,11 +92,11 @@ from .manipulation import stack #DEFINE_ALIAS from .manipulation import strided_slice #DEFINE_ALIAS from .manipulation import transpose #DEFINE_ALIAS from .manipulation import unique #DEFINE_ALIAS -# from .manipulation import unique_with_counts #DEFINE_ALIAS +from .manipulation import unique_with_counts #DEFINE_ALIAS from .manipulation import unsqueeze #DEFINE_ALIAS from .manipulation import unstack #DEFINE_ALIAS from .manipulation import flip #DEFINE_ALIAS -# from .manipulation import unbind #DEFINE_ALIAS +from .manipulation import unbind #DEFINE_ALIAS from .manipulation import roll #DEFINE_ALIAS from .math import abs #DEFINE_ALIAS from .math import acos #DEFINE_ALIAS @@ -116,10 +116,10 @@ from .math import elementwise_pow #DEFINE_ALIAS from .math import elementwise_sub #DEFINE_ALIAS from .math import exp #DEFINE_ALIAS from .math import floor #DEFINE_ALIAS -# from .math import increment #DEFINE_ALIAS +from .math import increment #DEFINE_ALIAS from .math import log #DEFINE_ALIAS from .math import mul #DEFINE_ALIAS -# from .math import multiplex #DEFINE_ALIAS +from .math import multiplex #DEFINE_ALIAS from .math import pow #DEFINE_ALIAS from .math import reciprocal #DEFINE_ALIAS from .math import reduce_max #DEFINE_ALIAS @@ -135,7 +135,7 @@ from .math import sqrt #DEFINE_ALIAS from .math import square #DEFINE_ALIAS from .math import stanh #DEFINE_ALIAS from .math import sum #DEFINE_ALIAS -# from .math import sums #DEFINE_ALIAS +from .math import sums #DEFINE_ALIAS from .math import tanh #DEFINE_ALIAS from .math import elementwise_sum #DEFINE_ALIAS from .math import max #DEFINE_ALIAS @@ -151,7 +151,6 @@ from .math import erf #DEFINE_ALIAS from .math import addcmul #DEFINE_ALIAS from .math import addmm #DEFINE_ALIAS from .math import clamp #DEFINE_ALIAS -from .manipulation import unbind #DEFINE_ALIAS from .math import trace #DEFINE_ALIAS from .math import kron #DEFINE_ALIAS # from .random import gaussin #DEFINE_ALIAS diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index bbc8a8efcbc..9f3f016571f 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -28,8 +28,10 @@ from ..fluid.layers import diag #DEFINE_ALIAS from ..fluid.layers import eye #DEFINE_ALIAS from ..fluid.layers import fill_constant #DEFINE_ALIAS +from ..fluid.layers import create_tensor #DEFINE_ALIAS + __all__ = [ - # 'create_tensor', + 'create_tensor', # 'create_lod_tensor', # 'create_random_int_lodtensor', 'crop_tensor', @@ -60,8 +62,8 @@ def full_like(input, stop_gradient=True, name=None): """ - :alias_main: paddle.full_like - :alias: paddle.full_like,paddle.tensor.full_like,paddle.tensor.creation.full_like + :alias_main: paddle.full_like + :alias: paddle.full_like,paddle.tensor.full_like,paddle.tensor.creation.full_like **full_like** This function creates a tensor filled with `fill_value` which has identical shape and dtype @@ -121,8 +123,8 @@ def full_like(input, def linspace(start, stop, num, dtype, out=None, device=None, name=None): """ - :alias_main: paddle.linspace - :alias: paddle.linspace,paddle.tensor.linspace,paddle.tensor.creation.linspace + :alias_main: paddle.linspace + :alias: paddle.linspace,paddle.tensor.linspace,paddle.tensor.creation.linspace This OP return fixed number of evenly spaced values within a given interval. @@ -210,8 +212,8 @@ def linspace(start, stop, num, dtype, out=None, device=None, name=None): def ones(shape, dtype=None, out=None, device=None): """ - :alias_main: paddle.ones - :alias: paddle.ones,paddle.tensor.ones,paddle.tensor.creation.ones + :alias_main: paddle.ones + :alias: paddle.ones,paddle.tensor.ones,paddle.tensor.creation.ones The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1. @@ -252,8 +254,8 @@ def ones(shape, dtype=None, out=None, device=None): def ones_like(input, dtype=None, device=None, name=None): """ - :alias_main: paddle.ones_like - :alias: paddle.ones_like,paddle.tensor.ones_like,paddle.tensor.creation.ones_like + :alias_main: paddle.ones_like + :alias: paddle.ones_like,paddle.tensor.ones_like,paddle.tensor.creation.ones_like This function creates a ones tensor which has identical shape and dtype with `input`. @@ -322,8 +324,8 @@ def ones_like(input, dtype=None, device=None, name=None): def zeros(shape, dtype, out=None, device=None): """ - :alias_main: paddle.zeros - :alias: paddle.zeros,paddle.tensor.zeros,paddle.tensor.creation.zeros + :alias_main: paddle.zeros + :alias: paddle.zeros,paddle.tensor.zeros,paddle.tensor.creation.zeros The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0. @@ -364,8 +366,8 @@ def zeros(shape, dtype, out=None, device=None): def zeros_like(input, dtype=None, device=None, name=None): """ - :alias_main: paddle.zeros_like - :alias: paddle.zeros_like,paddle.tensor.zeros_like,paddle.tensor.creation.zeros_like + :alias_main: paddle.zeros_like + :alias: paddle.zeros_like,paddle.tensor.zeros_like,paddle.tensor.creation.zeros_like This function creates a zeros tensor which has identical shape and dtype with `input`. @@ -503,8 +505,8 @@ def full(shape, stop_gradient=True, name=None): """ - :alias_main: paddle.full - :alias: paddle.full,paddle.tensor.full,paddle.tensor.creation.full + :alias_main: paddle.full + :alias: paddle.full,paddle.tensor.full,paddle.tensor.creation.full This Op return a Tensor with the `fill_value` which size is same as `shape` @@ -583,8 +585,8 @@ def full(shape, def arange(start, end, step=1, dtype=None, name=None): """ - :alias_main: paddle.arange - :alias: paddle.arange,paddle.tensor.arange,paddle.tensor.creation.arange + :alias_main: paddle.arange + :alias: paddle.arange,paddle.tensor.arange,paddle.tensor.creation.arange Return evenly spaced values within a given interval. @@ -690,8 +692,8 @@ def _tril_triu_op(helper): def tril(input, diagonal=0, name=None): """ - :alias_main: paddle.tril - :alias: paddle.tril,paddle.tensor.tril,paddle.tensor.creation.tril + :alias_main: paddle.tril + :alias: paddle.tril,paddle.tensor.tril,paddle.tensor.creation.tril This op returns the lower triangular part of a matrix (2-D tensor) or batch of matrices :attr:`input`, the other elements of the result tensor are set @@ -767,8 +769,8 @@ def tril(input, diagonal=0, name=None): def triu(input, diagonal=0, name=None): """ - :alias_main: paddle.triu - :alias: paddle.triu,paddle.tensor.triu,paddle.tensor.creation.triu + :alias_main: paddle.triu + :alias: paddle.triu,paddle.tensor.triu,paddle.tensor.creation.triu This op returns the upper triangular part of a matrix (2-D tensor) or batch of matrices :attr:`input`, the other elements of the result tensor are set to 0. @@ -844,8 +846,8 @@ def triu(input, diagonal=0, name=None): def meshgrid(input, name=None): """ - :alias_main: paddle.meshgrid - :alias: paddle.meshgrid,paddle.tensor.meshgrid,paddle.tensor.creation.meshgrid + :alias_main: paddle.meshgrid + :alias: paddle.meshgrid,paddle.tensor.meshgrid,paddle.tensor.creation.meshgrid This op takes a list of N tensors as input, each of which is 1-dimensional vector, and creates N-dimensional grids. diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index fb952605d0e..85506f1b7be 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -36,8 +36,8 @@ __all__ = [ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): """ - :alias_main: paddle.matmul - :alias: paddle.matmul,paddle.tensor.matmul,paddle.tensor.linalg.matmul + :alias_main: paddle.matmul + :alias: paddle.matmul,paddle.tensor.matmul,paddle.tensor.linalg.matmul Applies matrix multiplication to two tensors. @@ -172,8 +172,8 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): def norm(input, p='fro', axis=None, keepdim=False, out=None, name=None): """ - :alias_main: paddle.norm - :alias: paddle.norm,paddle.tensor.norm,paddle.tensor.linalg.norm + :alias_main: paddle.norm + :alias: paddle.norm,paddle.tensor.norm,paddle.tensor.linalg.norm Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. @@ -350,8 +350,8 @@ def norm(input, p='fro', axis=None, keepdim=False, out=None, name=None): def dist(x, y, p=2): """ - :alias_main: paddle.dist - :alias: paddle.dist,paddle.tensor.dist,paddle.tensor.linalg.dist + :alias_main: paddle.dist + :alias: paddle.dist,paddle.tensor.dist,paddle.tensor.linalg.dist This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for @@ -451,8 +451,8 @@ def dist(x, y, p=2): def dot(x, y, name=None): """ - :alias_main: paddle.dot - :alias: paddle.dot,paddle.tensor.dot,paddle.tensor.linalg.dot + :alias_main: paddle.dot + :alias: paddle.dot,paddle.tensor.dot,paddle.tensor.linalg.dot This operator calculates inner product for vectors. @@ -510,8 +510,8 @@ def dot(x, y, name=None): def t(input, name=None): """ - :alias_main: paddle.t - :alias: paddle.t,paddle.tensor.t,paddle.tensor.linalg.t + :alias_main: paddle.t + :alias: paddle.t,paddle.tensor.t,paddle.tensor.linalg.t Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to @@ -584,8 +584,8 @@ def t(input, name=None): def cross(input, other, dim=None): """ - :alias_main: paddle.cross - :alias: paddle.cross,paddle.tensor.cross,paddle.tensor.linalg.cross + :alias_main: paddle.cross + :alias: paddle.cross,paddle.tensor.cross,paddle.tensor.linalg.cross Returns the cross product of vectors in dimension `dim` of the `input` and `other` tensor. Inputs must have the same shape, and the size of their dim-th dimension should be equla to 3. @@ -649,8 +649,8 @@ def cross(input, other, dim=None): def cholesky(x, upper=False): """ - :alias_main: paddle.cholesky - :alias: paddle.cholesky,paddle.tensor.cholesky,paddle.tensor.linalg.cholesky + :alias_main: paddle.cholesky + :alias: paddle.cholesky,paddle.tensor.cholesky,paddle.tensor.linalg.cholesky Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. @@ -705,8 +705,8 @@ def cholesky(x, upper=False): def bmm(x, y, name=None): """ - :alias_main: paddle.bmm - :alias: paddle.bmm,paddle.tensor.bmm,paddle.tensor.linalg.bmm + :alias_main: paddle.bmm + :alias: paddle.bmm,paddle.tensor.bmm,paddle.tensor.linalg.bmm Applies batched matrix multiplication to two tensors. diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 74a97d4d836..9de2622e7c6 100644 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -54,8 +54,8 @@ __all__ = [ def equal(x, y, axis=-1, name=None): """ - :alias_main: paddle.equal - :alias: paddle.equal,paddle.tensor.equal,paddle.tensor.logic.equal + :alias_main: paddle.equal + :alias: paddle.equal,paddle.tensor.equal,paddle.tensor.logic.equal This OP returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise. @@ -125,8 +125,8 @@ def equal(x, y, axis=-1, name=None): @templatedoc() def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): """ - :alias_main: paddle.allclose - :alias: paddle.allclose,paddle.tensor.allclose,paddle.tensor.logic.allclose + :alias_main: paddle.allclose + :alias: paddle.allclose,paddle.tensor.allclose,paddle.tensor.logic.allclose ${comment} @@ -210,8 +210,8 @@ def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): def elementwise_equal(x, y, name=None): """ - :alias_main: paddle.elementwise_equal - :alias: paddle.elementwise_equal,paddle.tensor.elementwise_equal,paddle.tensor.logic.elementwise_equal + :alias_main: paddle.elementwise_equal + :alias: paddle.elementwise_equal,paddle.tensor.elementwise_equal,paddle.tensor.logic.elementwise_equal This layer returns the truth value of :math:`x == y` elementwise. diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 985f980cbf8..a888b78ea91 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -36,40 +36,25 @@ from ..fluid.layers import transpose #DEFINE_ALIAS from ..fluid.layers import unique #DEFINE_ALIAS from ..fluid.layers import unstack #DEFINE_ALIAS +from ..fluid.layers import gather_nd #DEFINE_ALIAS +from ..fluid.layers import scatter_nd_add #DEFINE_ALIAS +from ..fluid.layers import scatter_nd #DEFINE_ALIAS +from ..fluid.layers import shard_index #DEFINE_ALIAS +from ..fluid.layers import unique_with_counts #DEFINE_ALIAS + __all__ = [ - 'cast', - 'concat', - 'expand', - 'expand_as', - 'flatten', - 'gather', - # 'gather_nd', - 'reshape', - 'reverse', - 'scatter', - # 'scatter_nd_add', - # 'scatter_nd', - # 'shard_index', - 'slice', - 'split', - 'squeeze', - 'stack', - 'strided_slice', - 'transpose', - 'unique', - # 'unique_with_counts', - 'unsqueeze', - 'unstack', - 'flip', - 'unbind', - 'roll' + 'cast', 'concat', 'expand', 'expand_as', 'flatten', 'gather', 'gather_nd', + 'reshape', 'reverse', 'scatter', 'scatter_nd_add', 'scatter_nd', + 'shard_index', 'slice', 'split', 'squeeze', 'stack', 'strided_slice', + 'transpose', 'unique', 'unique_with_counts', 'unsqueeze', 'unstack', 'flip', + 'unbind', 'roll' ] def flip(input, dims, name=None): """ - :alias_main: paddle.flip - :alias: paddle.flip,paddle.tensor.flip,paddle.tensor.manipulation.flip + :alias_main: paddle.flip + :alias: paddle.flip,paddle.tensor.flip,paddle.tensor.manipulation.flip Reverse the order of a n-D tensor along given axis in dims. @@ -121,8 +106,8 @@ def flip(input, dims, name=None): def roll(input, shifts, dims=None): """ - :alias_main: paddle.roll - :alias: paddle.roll,paddle.tensor.roll,paddle.tensor.manipulation.roll + :alias_main: paddle.roll + :alias: paddle.roll,paddle.tensor.roll,paddle.tensor.manipulation.roll Roll the `input` tensor along the given dimension(s). Elements that are shifted beyond the last position are re-introduced at the first position. If a dimension is not specified, @@ -195,8 +180,8 @@ def roll(input, shifts, dims=None): def stack(x, axis=0, out=None, name=None): """ - :alias_main: paddle.stack - :alias: paddle.stack,paddle.tensor.stack,paddle.tensor.manipulation.stack + :alias_main: paddle.stack + :alias: paddle.stack,paddle.tensor.stack,paddle.tensor.manipulation.stack This OP stacks all the inputs :code:`x` along axis. @@ -308,8 +293,8 @@ def stack(x, axis=0, out=None, name=None): def split(input, num_or_sections, dim=-1, name=None): """ - :alias_main: paddle.split - :alias: paddle.split,paddle.tensor.split,paddle.tensor.manipulation.split + :alias_main: paddle.split + :alias: paddle.split,paddle.tensor.split,paddle.tensor.manipulation.split Split the input tensor into multiple sub-Tensors. Args: @@ -447,8 +432,8 @@ def split(input, num_or_sections, dim=-1, name=None): def squeeze(input, axes, out=None, name=None): """ - :alias_main: paddle.squeeze - :alias: paddle.squeeze,paddle.tensor.squeeze,paddle.tensor.manipulation.squeeze + :alias_main: paddle.squeeze + :alias: paddle.squeeze,paddle.tensor.squeeze,paddle.tensor.manipulation.squeeze This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will remove the dims by axes, the dims selected by axes should be one. If not provide axes, all dims equal @@ -526,8 +511,8 @@ def squeeze(input, axes, out=None, name=None): def unsqueeze(input, axes, out=None, name=None): """ - :alias_main: paddle.unsqueeze - :alias: paddle.unsqueeze,paddle.tensor.unsqueeze,paddle.tensor.manipulation.unsqueeze + :alias_main: paddle.unsqueeze + :alias: paddle.unsqueeze,paddle.tensor.unsqueeze,paddle.tensor.manipulation.unsqueeze Insert single-dimensional entries to the shape of a Tensor. Takes one required argument axes, a list of dimensions that will be inserted. @@ -609,8 +594,8 @@ def unsqueeze(input, axes, out=None, name=None): def gather(input, index, overwrite=True): """ - :alias_main: paddle.gather - :alias: paddle.gather,paddle.tensor.gather,paddle.tensor.manipulation.gather + :alias_main: paddle.gather + :alias: paddle.gather,paddle.tensor.gather,paddle.tensor.manipulation.gather **Gather Layer** @@ -683,8 +668,8 @@ def gather(input, index, overwrite=True): def unbind(input, axis=0): """ - :alias_main: paddle.tensor.unbind - :alias: paddle.tensor.unbind,paddle.tensor.manipulation.unbind + :alias_main: paddle.tensor.unbind + :alias: paddle.tensor.unbind,paddle.tensor.manipulation.unbind Removes a tensor dimension, then split the input tensor into multiple sub-Tensors. Args: diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 241a1c581d2..9144a29ac98 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -58,6 +58,10 @@ from ..fluid.layers import stanh #DEFINE_ALIAS from ..fluid.layers import atan #DEFINE_ALIAS from ..fluid.layers import erf #DEFINE_ALIAS +from ..fluid.layers import increment #DEFINE_ALIAS +from ..fluid.layers import multiplex #DEFINE_ALIAS +from ..fluid.layers import sums #DEFINE_ALIAS + __all__ = [ 'abs', 'acos', @@ -77,10 +81,10 @@ __all__ = [ 'elementwise_sub', 'exp', 'floor', -# 'increment', + 'increment', 'log', 'mul', -# 'multiplex', + 'multiplex', 'pow', 'reciprocal', 'reduce_max', @@ -96,7 +100,7 @@ __all__ = [ 'square', 'stanh', 'sum', -# 'sums', + 'sums', 'tanh', 'elementwise_sum', 'max', @@ -116,6 +120,7 @@ __all__ = [ 'kron' ] + # yapf: enable. diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 02709e568a6..feb2f6afd00 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -47,8 +47,8 @@ def randint(low, seed=0, name=None): """ - :alias_main: paddle.randint - :alias: paddle.randint,paddle.tensor.randint,paddle.tensor.random.randint + :alias_main: paddle.randint + :alias: paddle.randint,paddle.tensor.randint,paddle.tensor.random.randint This function returns a Tensor filled with random integers from the "discrete uniform" distribution of the specified data type in the interval [low, high). If high is None (the default), then results are from [0, low). @@ -213,8 +213,8 @@ def randn(shape, stop_gradient=True, name=None): """ - :alias_main: paddle.randn - :alias: paddle.randn,paddle.tensor.randn,paddle.tensor.random.randn + :alias_main: paddle.randn + :alias: paddle.randn,paddle.tensor.randn,paddle.tensor.random.randn This function returns a tensor filled with random numbers from a normal distribution with mean 0 and variance 1 (also called the standard normal @@ -324,8 +324,8 @@ def randperm(n, stop_gradient=True, seed=0): """ - :alias_main: paddle.randperm - :alias: paddle.randperm,paddle.tensor.randperm,paddle.tensor.random.randperm + :alias_main: paddle.randperm + :alias: paddle.randperm,paddle.tensor.randperm,paddle.tensor.random.randperm ${comment} @@ -408,8 +408,8 @@ def randperm(n, def rand(shape, out=None, dtype=None, device=None, stop_gradient=True): """ - :alias_main: paddle.rand - :alias: paddle.rand,paddle.tensor.rand,paddle.tensor.random.rand + :alias_main: paddle.rand + :alias: paddle.rand,paddle.tensor.rand,paddle.tensor.random.rand This OP initializes a variable with random values sampled from a uniform distribution in the range [0, 1). diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index d08e784e89b..59b8f1e765b 100644 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -44,8 +44,8 @@ from paddle.common_ops_import import * def argmax(input, axis=None, dtype=None, out=None, keepdims=False, name=None): """ - :alias_main: paddle.argmax - :alias: paddle.argmax,paddle.tensor.argmax,paddle.tensor.search.argmax + :alias_main: paddle.argmax + :alias: paddle.argmax,paddle.tensor.argmax,paddle.tensor.search.argmax This OP computes the indices of the max elements of the input tensor's element along the provided axis. @@ -137,8 +137,8 @@ def argmax(input, axis=None, dtype=None, out=None, keepdims=False, name=None): def index_select(input, index, dim=0): """ - :alias_main: paddle.index_select - :alias: paddle.index_select,paddle.tensor.index_select,paddle.tensor.search.index_select + :alias_main: paddle.index_select + :alias: paddle.index_select,paddle.tensor.index_select,paddle.tensor.search.index_select Returns a new tensor which indexes the `input` tensor along dimension `dim` using the entries in `index` which is a Tensor. The returned tensor has the same number @@ -201,8 +201,8 @@ def index_select(input, index, dim=0): def nonzero(input, as_tuple=False): """ - :alias_main: paddle.nonzero - :alias: paddle.nonzero,paddle.tensor.nonzero,paddle.tensor.search.nonzero + :alias_main: paddle.nonzero + :alias: paddle.nonzero,paddle.tensor.nonzero,paddle.tensor.search.nonzero Return a tensor containing the indices of all non-zero elements of the `input` tensor. If as_tuple is True, return a tuple of 1-D tensors, one for each dimension @@ -288,8 +288,8 @@ def nonzero(input, as_tuple=False): def sort(input, axis=-1, descending=False, out=None, name=None): """ - :alias_main: paddle.sort - :alias: paddle.sort,paddle.tensor.sort,paddle.tensor.search.sort + :alias_main: paddle.sort + :alias: paddle.sort,paddle.tensor.sort,paddle.tensor.search.sort This OP sorts the input along the given axis, and returns sorted output data Varibale and its corresponding index Variable with the same shape as @@ -380,8 +380,8 @@ def sort(input, axis=-1, descending=False, out=None, name=None): def where(condition, x, y, name=None): """ - :alias_main: paddle.where - :alias: paddle.where,paddle.tensor.where,paddle.tensor.search.where + :alias_main: paddle.where + :alias: paddle.where,paddle.tensor.where,paddle.tensor.search.where Return a tensor of elements selected from either $x$ or $y$, depending on $condition$. @@ -458,8 +458,8 @@ def where(condition, x, y, name=None): def index_sample(x, index): """ - :alias_main: paddle.index_sample - :alias: paddle.index_sample,paddle.tensor.index_sample,paddle.tensor.search.index_sample + :alias_main: paddle.index_sample + :alias: paddle.index_sample,paddle.tensor.index_sample,paddle.tensor.search.index_sample **IndexSample Layer** diff --git a/python/paddle/tensor/stat.py b/python/paddle/tensor/stat.py index 3dc6bbad014..9b3bb081d97 100644 --- a/python/paddle/tensor/stat.py +++ b/python/paddle/tensor/stat.py @@ -28,8 +28,8 @@ from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_t def var(input, axis=None, keepdim=False, unbiased=True, out=None, name=None): """ - :alias_main: paddle.var - :alias: paddle.var,paddle.tensor.var,paddle.tensor.stat.var + :alias_main: paddle.var + :alias: paddle.var,paddle.tensor.var,paddle.tensor.stat.var Computes the variance of the input Variable's elements along the specified axis. @@ -107,8 +107,8 @@ def var(input, axis=None, keepdim=False, unbiased=True, out=None, name=None): def std(input, axis=None, keepdim=False, unbiased=True, out=None, name=None): """ - :alias_main: paddle.std - :alias: paddle.std,paddle.tensor.std,paddle.tensor.stat.std + :alias_main: paddle.std + :alias: paddle.std,paddle.tensor.std,paddle.tensor.stat.std Computes the standard-deviation of the input Variable's elements along the specified axis. -- GitLab