未验证 提交 217ca776 编写于 作者: X XiaoguangHu 提交者: GitHub

[for 2.0-alpha] add alias in paddle.nn and paddle.tensor test=develop (#24561)

* add alias in paddle.nn and paddle.tensor test=develop

* add alias in paddle.nn and paddle.tensor dir test=develop

* fix same conflict manually test=develop

* update fc and dygraph alias test=develop

* fix initalizer.py typo test=develop
上级 2b6d0049
...@@ -43,10 +43,12 @@ import paddle.metric ...@@ -43,10 +43,12 @@ import paddle.metric
import paddle.incubate.complex as complex import paddle.incubate.complex as complex
# TODO: define alias in tensor and framework directory # TODO: define alias in tensor and framework directory
from .tensor.random import randperm from .tensor.random import randperm
from .tensor.attribute import rank #DEFINE_ALIAS from .tensor.attribute import rank #DEFINE_ALIAS
from .tensor.attribute import shape #DEFINE_ALIAS from .tensor.attribute import shape #DEFINE_ALIAS
# from .tensor.creation import create_tensor #DEFINE_ALIAS from .tensor.creation import create_tensor #DEFINE_ALIAS
# from .tensor.creation import create_lod_tensor #DEFINE_ALIAS # from .tensor.creation import create_lod_tensor #DEFINE_ALIAS
# from .tensor.creation import create_random_int_lodtensor #DEFINE_ALIAS # from .tensor.creation import create_random_int_lodtensor #DEFINE_ALIAS
from .tensor.creation import crop_tensor #DEFINE_ALIAS from .tensor.creation import crop_tensor #DEFINE_ALIAS
...@@ -102,13 +104,13 @@ from .tensor.manipulation import expand #DEFINE_ALIAS ...@@ -102,13 +104,13 @@ from .tensor.manipulation import expand #DEFINE_ALIAS
from .tensor.manipulation import expand_as #DEFINE_ALIAS from .tensor.manipulation import expand_as #DEFINE_ALIAS
from .tensor.manipulation import flatten #DEFINE_ALIAS from .tensor.manipulation import flatten #DEFINE_ALIAS
from .tensor.manipulation import gather #DEFINE_ALIAS from .tensor.manipulation import gather #DEFINE_ALIAS
# from .tensor.manipulation import gather_nd #DEFINE_ALIAS from .tensor.manipulation import gather_nd #DEFINE_ALIAS
from .tensor.manipulation import reshape #DEFINE_ALIAS from .tensor.manipulation import reshape #DEFINE_ALIAS
from .tensor.manipulation import reverse #DEFINE_ALIAS from .tensor.manipulation import reverse #DEFINE_ALIAS
from .tensor.manipulation import scatter #DEFINE_ALIAS from .tensor.manipulation import scatter #DEFINE_ALIAS
# from .tensor.manipulation import scatter_nd_add #DEFINE_ALIAS from .tensor.manipulation import scatter_nd_add #DEFINE_ALIAS
# from .tensor.manipulation import scatter_nd #DEFINE_ALIAS from .tensor.manipulation import scatter_nd #DEFINE_ALIAS
# from .tensor.manipulation import shard_index #DEFINE_ALIAS from .tensor.manipulation import shard_index #DEFINE_ALIAS
from .tensor.manipulation import slice #DEFINE_ALIAS from .tensor.manipulation import slice #DEFINE_ALIAS
from .tensor.manipulation import split #DEFINE_ALIAS from .tensor.manipulation import split #DEFINE_ALIAS
from .tensor.manipulation import squeeze #DEFINE_ALIAS from .tensor.manipulation import squeeze #DEFINE_ALIAS
...@@ -116,11 +118,11 @@ from .tensor.manipulation import stack #DEFINE_ALIAS ...@@ -116,11 +118,11 @@ from .tensor.manipulation import stack #DEFINE_ALIAS
from .tensor.manipulation import strided_slice #DEFINE_ALIAS from .tensor.manipulation import strided_slice #DEFINE_ALIAS
from .tensor.manipulation import transpose #DEFINE_ALIAS from .tensor.manipulation import transpose #DEFINE_ALIAS
from .tensor.manipulation import unique #DEFINE_ALIAS from .tensor.manipulation import unique #DEFINE_ALIAS
# from .tensor.manipulation import unique_with_counts #DEFINE_ALIAS from .tensor.manipulation import unique_with_counts #DEFINE_ALIAS
from .tensor.manipulation import unsqueeze #DEFINE_ALIAS from .tensor.manipulation import unsqueeze #DEFINE_ALIAS
from .tensor.manipulation import unstack #DEFINE_ALIAS from .tensor.manipulation import unstack #DEFINE_ALIAS
from .tensor.manipulation import flip #DEFINE_ALIAS from .tensor.manipulation import flip #DEFINE_ALIAS
# from .tensor.manipulation import unbind #DEFINE_ALIAS from .tensor.manipulation import unbind #DEFINE_ALIAS
from .tensor.manipulation import roll #DEFINE_ALIAS from .tensor.manipulation import roll #DEFINE_ALIAS
from .tensor.math import abs #DEFINE_ALIAS from .tensor.math import abs #DEFINE_ALIAS
from .tensor.math import acos #DEFINE_ALIAS from .tensor.math import acos #DEFINE_ALIAS
...@@ -140,10 +142,10 @@ from .tensor.math import elementwise_pow #DEFINE_ALIAS ...@@ -140,10 +142,10 @@ from .tensor.math import elementwise_pow #DEFINE_ALIAS
from .tensor.math import elementwise_sub #DEFINE_ALIAS from .tensor.math import elementwise_sub #DEFINE_ALIAS
from .tensor.math import exp #DEFINE_ALIAS from .tensor.math import exp #DEFINE_ALIAS
from .tensor.math import floor #DEFINE_ALIAS from .tensor.math import floor #DEFINE_ALIAS
# from .tensor.math import increment #DEFINE_ALIAS from .tensor.math import increment #DEFINE_ALIAS
from .tensor.math import log #DEFINE_ALIAS from .tensor.math import log #DEFINE_ALIAS
from .tensor.math import mul #DEFINE_ALIAS from .tensor.math import mul #DEFINE_ALIAS
# from .tensor.math import multiplex #DEFINE_ALIAS from .tensor.math import multiplex #DEFINE_ALIAS
from .tensor.math import pow #DEFINE_ALIAS from .tensor.math import pow #DEFINE_ALIAS
from .tensor.math import reciprocal #DEFINE_ALIAS from .tensor.math import reciprocal #DEFINE_ALIAS
from .tensor.math import reduce_max #DEFINE_ALIAS from .tensor.math import reduce_max #DEFINE_ALIAS
...@@ -159,7 +161,7 @@ from .tensor.math import sqrt #DEFINE_ALIAS ...@@ -159,7 +161,7 @@ from .tensor.math import sqrt #DEFINE_ALIAS
from .tensor.math import square #DEFINE_ALIAS from .tensor.math import square #DEFINE_ALIAS
from .tensor.math import stanh #DEFINE_ALIAS from .tensor.math import stanh #DEFINE_ALIAS
from .tensor.math import sum #DEFINE_ALIAS from .tensor.math import sum #DEFINE_ALIAS
# from .tensor.math import sums #DEFINE_ALIAS from .tensor.math import sums #DEFINE_ALIAS
from .tensor.math import tanh #DEFINE_ALIAS from .tensor.math import tanh #DEFINE_ALIAS
from .tensor.math import elementwise_sum #DEFINE_ALIAS from .tensor.math import elementwise_sum #DEFINE_ALIAS
from .tensor.math import max #DEFINE_ALIAS from .tensor.math import max #DEFINE_ALIAS
......
...@@ -33,12 +33,12 @@ from .clip import GradientClipByNorm #DEFINE_ALIAS ...@@ -33,12 +33,12 @@ from .clip import GradientClipByNorm #DEFINE_ALIAS
from .clip import GradientClipByValue #DEFINE_ALIAS from .clip import GradientClipByValue #DEFINE_ALIAS
# from .clip import set_gradient_clip #DEFINE_ALIAS # from .clip import set_gradient_clip #DEFINE_ALIAS
from .clip import clip #DEFINE_ALIAS from .clip import clip #DEFINE_ALIAS
# from .clip import clip_by_norm #DEFINE_ALIAS from .clip import clip_by_norm #DEFINE_ALIAS
from .control_flow import case #DEFINE_ALIAS from .control_flow import case #DEFINE_ALIAS
from .control_flow import cond #DEFINE_ALIAS from .control_flow import cond #DEFINE_ALIAS
# from .control_flow import DynamicRNN #DEFINE_ALIAS # from .control_flow import DynamicRNN #DEFINE_ALIAS
# from .control_flow import StaticRNN #DEFINE_ALIAS # from .control_flow import StaticRNN #DEFINE_ALIAS
# from .control_flow import switch_case #DEFINE_ALIAS from .control_flow import switch_case #DEFINE_ALIAS
from .control_flow import while_loop #DEFINE_ALIAS from .control_flow import while_loop #DEFINE_ALIAS
# from .control_flow import rnn #DEFINE_ALIAS # from .control_flow import rnn #DEFINE_ALIAS
# from .decode import BeamSearchDecoder #DEFINE_ALIAS # from .decode import BeamSearchDecoder #DEFINE_ALIAS
...@@ -48,7 +48,7 @@ from .decode import beam_search_decode #DEFINE_ALIAS ...@@ -48,7 +48,7 @@ from .decode import beam_search_decode #DEFINE_ALIAS
# from .decode import crf_decoding #DEFINE_ALIAS # from .decode import crf_decoding #DEFINE_ALIAS
# from .decode import ctc_greedy_decoder #DEFINE_ALIAS # from .decode import ctc_greedy_decoder #DEFINE_ALIAS
# from .decode import dynamic_decode #DEFINE_ALIAS # from .decode import dynamic_decode #DEFINE_ALIAS
# from .decode import gather_tree #DEFINE_ALIAS from .decode import gather_tree #DEFINE_ALIAS
from .input import data #DEFINE_ALIAS from .input import data #DEFINE_ALIAS
# from .input import Input #DEFINE_ALIAS # from .input import Input #DEFINE_ALIAS
# from .layer.activation import PReLU #DEFINE_ALIAS # from .layer.activation import PReLU #DEFINE_ALIAS
...@@ -90,6 +90,7 @@ from .layer.norm import InstanceNorm #DEFINE_ALIAS ...@@ -90,6 +90,7 @@ from .layer.norm import InstanceNorm #DEFINE_ALIAS
# from .layer.rnn import RNNCell #DEFINE_ALIAS # from .layer.rnn import RNNCell #DEFINE_ALIAS
# from .layer.rnn import GRUCell #DEFINE_ALIAS # from .layer.rnn import GRUCell #DEFINE_ALIAS
# from .layer.rnn import LSTMCell #DEFINE_ALIAS # from .layer.rnn import LSTMCell #DEFINE_ALIAS
from .layer import loss #DEFINE_ALIAS from .layer import loss #DEFINE_ALIAS
from .layer import conv #DEFINE_ALIAS from .layer import conv #DEFINE_ALIAS
from ..fluid.dygraph.layers import Layer #DEFINE_ALIAS from ..fluid.dygraph.layers import Layer #DEFINE_ALIAS
......
...@@ -18,6 +18,8 @@ from ..fluid.clip import GradientClipByNorm #DEFINE_ALIAS ...@@ -18,6 +18,8 @@ from ..fluid.clip import GradientClipByNorm #DEFINE_ALIAS
from ..fluid.clip import GradientClipByValue #DEFINE_ALIAS from ..fluid.clip import GradientClipByValue #DEFINE_ALIAS
from ..fluid.layers import clip #DEFINE_ALIAS from ..fluid.layers import clip #DEFINE_ALIAS
from ..fluid.layers import clip_by_norm #DEFINE_ALIAS
__all__ = [ __all__ = [
# 'ErrorClipByValue', # 'ErrorClipByValue',
'GradientClipByGlobalNorm', 'GradientClipByGlobalNorm',
...@@ -25,5 +27,5 @@ __all__ = [ ...@@ -25,5 +27,5 @@ __all__ = [
'GradientClipByValue', 'GradientClipByValue',
# 'set_gradient_clip', # 'set_gradient_clip',
'clip', 'clip',
# 'clip_by_norm' 'clip_by_norm'
] ]
...@@ -17,12 +17,14 @@ from ..fluid.layers import case #DEFINE_ALIAS ...@@ -17,12 +17,14 @@ from ..fluid.layers import case #DEFINE_ALIAS
from ..fluid.layers import cond #DEFINE_ALIAS from ..fluid.layers import cond #DEFINE_ALIAS
from ..fluid.layers import while_loop #DEFINE_ALIAS from ..fluid.layers import while_loop #DEFINE_ALIAS
from ..fluid.layers import switch_case #DEFINE_ALIAS
__all__ = [ __all__ = [
'case', 'case',
'cond', 'cond',
# 'DynamicRNN', # 'DynamicRNN',
# 'StaticRNN', # 'StaticRNN',
# 'switch_case', 'switch_case',
'while_loop', 'while_loop',
# 'rnn' # 'rnn'
] ]
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
from ..fluid.layers import beam_search #DEFINE_ALIAS from ..fluid.layers import beam_search #DEFINE_ALIAS
from ..fluid.layers import beam_search_decode #DEFINE_ALIAS from ..fluid.layers import beam_search_decode #DEFINE_ALIAS
from ..fluid.layers import gather_tree #DEFINE_ALIAS
__all__ = [ __all__ = [
# 'BeamSearchDecoder', # 'BeamSearchDecoder',
# 'Decoder', # 'Decoder',
...@@ -24,5 +26,5 @@ __all__ = [ ...@@ -24,5 +26,5 @@ __all__ = [
# 'crf_decoding', # 'crf_decoding',
# 'ctc_greedy_decoder', # 'ctc_greedy_decoder',
# 'dynamic_decode', # 'dynamic_decode',
# 'gather_tree' 'gather_tree'
] ]
...@@ -52,11 +52,11 @@ from .activation import thresholded_relu #DEFINE_ALIAS ...@@ -52,11 +52,11 @@ from .activation import thresholded_relu #DEFINE_ALIAS
from .activation import log_softmax #DEFINE_ALIAS from .activation import log_softmax #DEFINE_ALIAS
from .common import dropout #DEFINE_ALIAS from .common import dropout #DEFINE_ALIAS
# from .common import embedding #DEFINE_ALIAS # from .common import embedding #DEFINE_ALIAS
# from .common import fc #DEFINE_ALIAS # from .common import fc #DEFINE_ALIAS
from .common import label_smooth #DEFINE_ALIAS from .common import label_smooth #DEFINE_ALIAS
from .common import one_hot #DEFINE_ALIAS from .common import one_hot #DEFINE_ALIAS
from .common import pad #DEFINE_ALIAS from .common import pad #DEFINE_ALIAS
# from .common import pad_constant_like #DEFINE_ALIAS from .common import pad_constant_like #DEFINE_ALIAS
from .common import pad2d #DEFINE_ALIAS from .common import pad2d #DEFINE_ALIAS
from .common import unfold #DEFINE_ALIAS from .common import unfold #DEFINE_ALIAS
# from .common import bilinear_tensor_product #DEFINE_ALIAS # from .common import bilinear_tensor_product #DEFINE_ALIAS
...@@ -68,19 +68,19 @@ from .conv import conv3d #DEFINE_ALIAS ...@@ -68,19 +68,19 @@ from .conv import conv3d #DEFINE_ALIAS
from .conv import conv3d_transpose #DEFINE_ALIAS from .conv import conv3d_transpose #DEFINE_ALIAS
from .extension import add_position_encoding #DEFINE_ALIAS from .extension import add_position_encoding #DEFINE_ALIAS
# from .extension import autoincreased_step_counter #DEFINE_ALIAS # from .extension import autoincreased_step_counter #DEFINE_ALIAS
# from .extension import continuous_value_model #DEFINE_ALIAS from .extension import continuous_value_model #DEFINE_ALIAS
# from .extension import filter_by_instag #DEFINE_ALIAS from .extension import filter_by_instag #DEFINE_ALIAS
# from .extension import linear_chain_crf #DEFINE_ALIAS # from .extension import linear_chain_crf #DEFINE_ALIAS
# from .extension import merge_selected_rows #DEFINE_ALIAS # from .extension import merge_selected_rows #DEFINE_ALIAS
from .extension import multiclass_nms #DEFINE_ALIAS from .extension import multiclass_nms #DEFINE_ALIAS
# from .extension import polygon_box_transform #DEFINE_ALIAS from .extension import polygon_box_transform #DEFINE_ALIAS
# from .extension import random_crop #DEFINE_ALIAS from .extension import random_crop #DEFINE_ALIAS
from .extension import row_conv #DEFINE_ALIAS from .extension import row_conv #DEFINE_ALIAS
# from .extension import rpn_target_assign #DEFINE_ALIAS from .extension import rpn_target_assign #DEFINE_ALIAS
# from .extension import similarity_focus #DEFINE_ALIAS from .extension import similarity_focus #DEFINE_ALIAS
from .extension import target_assign #DEFINE_ALIAS from .extension import target_assign #DEFINE_ALIAS
from .extension import temporal_shift #DEFINE_ALIAS from .extension import temporal_shift #DEFINE_ALIAS
# from .extension import warpctc #DEFINE_ALIAS from .extension import warpctc #DEFINE_ALIAS
from .extension import diag_embed #DEFINE_ALIAS from .extension import diag_embed #DEFINE_ALIAS
from .learning_rate import cosine_decay #DEFINE_ALIAS from .learning_rate import cosine_decay #DEFINE_ALIAS
from .learning_rate import exponential_decay #DEFINE_ALIAS from .learning_rate import exponential_decay #DEFINE_ALIAS
...@@ -123,17 +123,17 @@ from .loss import bpr_loss #DEFINE_ALIAS ...@@ -123,17 +123,17 @@ from .loss import bpr_loss #DEFINE_ALIAS
from .loss import center_loss #DEFINE_ALIAS from .loss import center_loss #DEFINE_ALIAS
from .loss import cross_entropy #DEFINE_ALIAS from .loss import cross_entropy #DEFINE_ALIAS
from .loss import dice_loss #DEFINE_ALIAS from .loss import dice_loss #DEFINE_ALIAS
# from .loss import edit_distance #DEFINE_ALIAS from .loss import edit_distance #DEFINE_ALIAS
# from .loss import huber_loss #DEFINE_ALIAS from .loss import huber_loss #DEFINE_ALIAS
from .loss import iou_similarity #DEFINE_ALIAS from .loss import iou_similarity #DEFINE_ALIAS
from .loss import kldiv_loss #DEFINE_ALIAS from .loss import kldiv_loss #DEFINE_ALIAS
from .loss import log_loss #DEFINE_ALIAS from .loss import log_loss #DEFINE_ALIAS
# from .loss import margin_rank_loss #DEFINE_ALIAS from .loss import margin_rank_loss #DEFINE_ALIAS
from .loss import mse_loss #DEFINE_ALIAS from .loss import mse_loss #DEFINE_ALIAS
# from .loss import nce #DEFINE_ALIAS # from .loss import nce #DEFINE_ALIAS
from .loss import npair_loss #DEFINE_ALIAS from .loss import npair_loss #DEFINE_ALIAS
from .loss import rank_loss #DEFINE_ALIAS from .loss import rank_loss #DEFINE_ALIAS
# from .loss import sampled_softmax_with_cross_entropy #DEFINE_ALIAS from .loss import sampled_softmax_with_cross_entropy #DEFINE_ALIAS
from .loss import sigmoid_cross_entropy_with_logits #DEFINE_ALIAS from .loss import sigmoid_cross_entropy_with_logits #DEFINE_ALIAS
from .loss import sigmoid_focal_loss #DEFINE_ALIAS from .loss import sigmoid_focal_loss #DEFINE_ALIAS
from .loss import smooth_l1 #DEFINE_ALIAS from .loss import smooth_l1 #DEFINE_ALIAS
...@@ -169,27 +169,27 @@ from .vision import deformable_roi_pooling #DEFINE_ALIAS ...@@ -169,27 +169,27 @@ from .vision import deformable_roi_pooling #DEFINE_ALIAS
from .vision import density_prior_box #DEFINE_ALIAS from .vision import density_prior_box #DEFINE_ALIAS
from .vision import detection_output #DEFINE_ALIAS from .vision import detection_output #DEFINE_ALIAS
from .vision import distribute_fpn_proposals #DEFINE_ALIAS from .vision import distribute_fpn_proposals #DEFINE_ALIAS
# from .vision import fsp_matrix #DEFINE_ALIAS from .vision import fsp_matrix #DEFINE_ALIAS
from .vision import generate_mask_labels #DEFINE_ALIAS from .vision import generate_mask_labels #DEFINE_ALIAS
from .vision import generate_proposal_labels #DEFINE_ALIAS from .vision import generate_proposal_labels #DEFINE_ALIAS
from .vision import generate_proposals #DEFINE_ALIAS from .vision import generate_proposals #DEFINE_ALIAS
from .vision import grid_sampler #DEFINE_ALIAS from .vision import grid_sampler #DEFINE_ALIAS
from .vision import image_resize #DEFINE_ALIAS from .vision import image_resize #DEFINE_ALIAS
# from .vision import image_resize_short #DEFINE_ALIAS from .vision import image_resize_short #DEFINE_ALIAS
# from .vision import multi_box_head #DEFINE_ALIAS # from .vision import multi_box_head #DEFINE_ALIAS
# from .vision import pixel_shuffle #DEFINE_ALIAS from .vision import pixel_shuffle #DEFINE_ALIAS
from .vision import prior_box #DEFINE_ALIAS from .vision import prior_box #DEFINE_ALIAS
from .vision import prroi_pool #DEFINE_ALIAS from .vision import prroi_pool #DEFINE_ALIAS
from .vision import psroi_pool #DEFINE_ALIAS from .vision import psroi_pool #DEFINE_ALIAS
from .vision import resize_bilinear #DEFINE_ALIAS from .vision import resize_bilinear #DEFINE_ALIAS
from .vision import resize_nearest #DEFINE_ALIAS from .vision import resize_nearest #DEFINE_ALIAS
from .vision import resize_trilinear #DEFINE_ALIAS from .vision import resize_trilinear #DEFINE_ALIAS
# from .vision import retinanet_detection_output #DEFINE_ALIAS from .vision import retinanet_detection_output #DEFINE_ALIAS
# from .vision import retinanet_target_assign #DEFINE_ALIAS from .vision import retinanet_target_assign #DEFINE_ALIAS
from .vision import roi_align #DEFINE_ALIAS from .vision import roi_align #DEFINE_ALIAS
# from .vision import roi_perspective_transform #DEFINE_ALIAS from .vision import roi_perspective_transform #DEFINE_ALIAS
from .vision import roi_pool #DEFINE_ALIAS from .vision import roi_pool #DEFINE_ALIAS
# from .vision import shuffle_channel #DEFINE_ALIAS from .vision import shuffle_channel #DEFINE_ALIAS
from .vision import space_to_depth #DEFINE_ALIAS from .vision import space_to_depth #DEFINE_ALIAS
from .vision import yolo_box #DEFINE_ALIAS from .vision import yolo_box #DEFINE_ALIAS
from .vision import yolov3_loss #DEFINE_ALIAS from .vision import yolov3_loss #DEFINE_ALIAS
...@@ -25,6 +25,9 @@ from ...fluid.layers import pad2d #DEFINE_ALIAS ...@@ -25,6 +25,9 @@ from ...fluid.layers import pad2d #DEFINE_ALIAS
from ...fluid.layers import unfold #DEFINE_ALIAS from ...fluid.layers import unfold #DEFINE_ALIAS
from ...fluid.layers import assign #DEFINE_ALIAS from ...fluid.layers import assign #DEFINE_ALIAS
#from ...fluid.layers import fc #DEFINE_ALIAS
from ...fluid.layers import pad_constant_like #DEFINE_ALIAS
__all__ = [ __all__ = [
'dropout', 'dropout',
# 'embedding', # 'embedding',
...@@ -32,7 +35,7 @@ __all__ = [ ...@@ -32,7 +35,7 @@ __all__ = [
'label_smooth', 'label_smooth',
'one_hot', 'one_hot',
'pad', 'pad',
# 'pad_constant_like', 'pad_constant_like',
'pad2d', 'pad2d',
'unfold', 'unfold',
# 'bilinear_tensor_product', # 'bilinear_tensor_product',
......
...@@ -99,8 +99,8 @@ def conv2d(input, ...@@ -99,8 +99,8 @@ def conv2d(input,
data_format="NCHW", data_format="NCHW",
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.conv2d :alias_main: paddle.nn.functional.conv2d
:alias: paddle.nn.functional.conv2d,paddle.nn.functional.conv.conv2d :alias: paddle.nn.functional.conv2d,paddle.nn.functional.conv.conv2d
The convolution2D layer calculates the output based on the input, filter The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and and strides, paddings, dilations, groups parameters. Input and
...@@ -336,8 +336,8 @@ def conv2d_transpose(input, ...@@ -336,8 +336,8 @@ def conv2d_transpose(input,
data_format='NCHW', data_format='NCHW',
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.conv2d_transpose :alias_main: paddle.nn.functional.conv2d_transpose
:alias: paddle.nn.functional.conv2d_transpose,paddle.nn.functional.conv.conv2d_transpose :alias: paddle.nn.functional.conv2d_transpose,paddle.nn.functional.conv.conv2d_transpose
The convolution2D transpose layer calculates the output based on the input, The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output) filter, and dilations, strides, paddings. Input(Input) and output(Output)
...@@ -578,8 +578,8 @@ def conv3d(input, ...@@ -578,8 +578,8 @@ def conv3d(input,
data_format="NCDHW", data_format="NCDHW",
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.conv3d :alias_main: paddle.nn.functional.conv3d
:alias: paddle.nn.functional.conv3d,paddle.nn.functional.conv.conv3d :alias: paddle.nn.functional.conv3d,paddle.nn.functional.conv.conv3d
The convolution3D layer calculates the output based on the input, filter The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and and strides, paddings, dilations, groups parameters. Input(Input) and
...@@ -795,8 +795,8 @@ def conv3d_transpose(input, ...@@ -795,8 +795,8 @@ def conv3d_transpose(input,
data_format='NCDHW', data_format='NCDHW',
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.conv3d_transpose :alias_main: paddle.nn.functional.conv3d_transpose
:alias: paddle.nn.functional.conv3d_transpose,paddle.nn.functional.conv.conv3d_transpose :alias: paddle.nn.functional.conv3d_transpose,paddle.nn.functional.conv.conv3d_transpose
The convolution3D transpose layer calculates the output based on the input, The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output) filter, and dilations, strides, paddings. Input(Input) and output(Output)
......
...@@ -18,22 +18,30 @@ from ...fluid.layers import multiclass_nms #DEFINE_ALIAS ...@@ -18,22 +18,30 @@ from ...fluid.layers import multiclass_nms #DEFINE_ALIAS
from ...fluid.layers import target_assign #DEFINE_ALIAS from ...fluid.layers import target_assign #DEFINE_ALIAS
from ...fluid.layers import temporal_shift #DEFINE_ALIAS from ...fluid.layers import temporal_shift #DEFINE_ALIAS
from ...fluid.layers import continuous_value_model #DEFINE_ALIAS
from ...fluid.layers import filter_by_instag #DEFINE_ALIAS
from ...fluid.layers import polygon_box_transform #DEFINE_ALIAS
from ...fluid.layers import random_crop #DEFINE_ALIAS
from ...fluid.layers import rpn_target_assign #DEFINE_ALIAS
from ...fluid.layers import similarity_focus #DEFINE_ALIAS
from ...fluid.layers import warpctc #DEFINE_ALIAS
__all__ = [ __all__ = [
'add_position_encoding', 'add_position_encoding',
# 'autoincreased_step_counter', # 'autoincreased_step_counter',
# 'continuous_value_model', 'continuous_value_model',
# 'filter_by_instag', 'filter_by_instag',
# 'linear_chain_crf', # 'linear_chain_crf',
# 'merge_selected_rows', # 'merge_selected_rows',
'multiclass_nms', 'multiclass_nms',
# 'polygon_box_transform', 'polygon_box_transform',
# 'random_crop', 'random_crop',
'row_conv', 'row_conv',
# 'rpn_target_assign', 'rpn_target_assign',
# 'similarity_focus', 'similarity_focus',
'target_assign', 'target_assign',
'temporal_shift', 'temporal_shift',
# 'warpctc', 'warpctc',
'diag_embed' 'diag_embed'
] ]
...@@ -48,8 +56,8 @@ from ...fluid.layers.layer_function_generator import templatedoc ...@@ -48,8 +56,8 @@ from ...fluid.layers.layer_function_generator import templatedoc
def diag_embed(input, offset=0, dim1=-2, dim2=-1): def diag_embed(input, offset=0, dim1=-2, dim2=-1):
""" """
:alias_main: paddle.nn.functional.diag_embed :alias_main: paddle.nn.functional.diag_embed
:alias: paddle.nn.functional.diag_embed,paddle.nn.functional.extension.diag_embed :alias: paddle.nn.functional.diag_embed,paddle.nn.functional.extension.diag_embed
This OP creates a tensor whose diagonals of certain 2D planes (specified by dim1 and dim2) This OP creates a tensor whose diagonals of certain 2D planes (specified by dim1 and dim2)
are filled by ``input``. By default, a 2D plane formed by the last two dimensions are filled by ``input``. By default, a 2D plane formed by the last two dimensions
...@@ -168,8 +176,8 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1): ...@@ -168,8 +176,8 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1):
@templatedoc() @templatedoc()
def row_conv(input, weight, act=None): def row_conv(input, weight, act=None):
""" """
:alias_main: paddle.nn.functional.row_conv :alias_main: paddle.nn.functional.row_conv
:alias: paddle.nn.functional.row_conv,paddle.nn.functional.extension.row_conv :alias: paddle.nn.functional.row_conv,paddle.nn.functional.extension.row_conv
${comment} ${comment}
......
...@@ -31,22 +31,27 @@ from ...fluid.layers import square_error_cost #DEFINE_ALIAS ...@@ -31,22 +31,27 @@ from ...fluid.layers import square_error_cost #DEFINE_ALIAS
from ...fluid.layers import ssd_loss #DEFINE_ALIAS from ...fluid.layers import ssd_loss #DEFINE_ALIAS
from ...fluid.layers import teacher_student_sigmoid_loss #DEFINE_ALIAS from ...fluid.layers import teacher_student_sigmoid_loss #DEFINE_ALIAS
from ...fluid.layers import edit_distance #DEFINE_ALIAS
from ...fluid.layers import huber_loss #DEFINE_ALIAS
from ...fluid.layers import margin_rank_loss #DEFINE_ALIAS
from ...fluid.layers import sampled_softmax_with_cross_entropy #DEFINE_ALIAS
__all__ = [ __all__ = [
'bpr_loss', 'bpr_loss',
'center_loss', 'center_loss',
'cross_entropy', 'cross_entropy',
'dice_loss', 'dice_loss',
# 'edit_distance', 'edit_distance',
# 'huber_loss', 'huber_loss',
'iou_similarity', 'iou_similarity',
'kldiv_loss', 'kldiv_loss',
'log_loss', 'log_loss',
# 'margin_rank_loss', 'margin_rank_loss',
'mse_loss', 'mse_loss',
# 'nce', # 'nce',
'npair_loss', 'npair_loss',
'rank_loss', 'rank_loss',
# 'sampled_softmax_with_cross_entropy', 'sampled_softmax_with_cross_entropy',
'sigmoid_cross_entropy_with_logits', 'sigmoid_cross_entropy_with_logits',
'sigmoid_focal_loss', 'sigmoid_focal_loss',
'smooth_l1', 'smooth_l1',
......
...@@ -42,6 +42,14 @@ from ...fluid.layers import space_to_depth #DEFINE_ALIAS ...@@ -42,6 +42,14 @@ from ...fluid.layers import space_to_depth #DEFINE_ALIAS
from ...fluid.layers import yolo_box #DEFINE_ALIAS from ...fluid.layers import yolo_box #DEFINE_ALIAS
from ...fluid.layers import yolov3_loss #DEFINE_ALIAS from ...fluid.layers import yolov3_loss #DEFINE_ALIAS
from ...fluid.layers import fsp_matrix #DEFINE_ALIAS
from ...fluid.layers import image_resize_short #DEFINE_ALIAS
from ...fluid.layers import pixel_shuffle #DEFINE_ALIAS
from ...fluid.layers import retinanet_detection_output #DEFINE_ALIAS
from ...fluid.layers import retinanet_target_assign #DEFINE_ALIAS
from ...fluid.layers import roi_perspective_transform #DEFINE_ALIAS
from ...fluid.layers import shuffle_channel #DEFINE_ALIAS
__all__ = [ __all__ = [
'affine_channel', 'affine_channel',
'affine_grid', 'affine_grid',
...@@ -56,27 +64,27 @@ __all__ = [ ...@@ -56,27 +64,27 @@ __all__ = [
'density_prior_box', 'density_prior_box',
'detection_output', 'detection_output',
'distribute_fpn_proposals', 'distribute_fpn_proposals',
# 'fsp_matrix', 'fsp_matrix',
'generate_mask_labels', 'generate_mask_labels',
'generate_proposal_labels', 'generate_proposal_labels',
'generate_proposals', 'generate_proposals',
'grid_sampler', 'grid_sampler',
'image_resize', 'image_resize',
# 'image_resize_short', 'image_resize_short',
# 'multi_box_head', # 'multi_box_head',
# 'pixel_shuffle', 'pixel_shuffle',
'prior_box', 'prior_box',
'prroi_pool', 'prroi_pool',
'psroi_pool', 'psroi_pool',
'resize_bilinear', 'resize_bilinear',
'resize_nearest', 'resize_nearest',
'resize_trilinear', 'resize_trilinear',
# 'retinanet_detection_output', 'retinanet_detection_output',
# 'retinanet_target_assign', 'retinanet_target_assign',
'roi_align', 'roi_align',
# 'roi_perspective_transform', 'roi_perspective_transform',
'roi_pool', 'roi_pool',
# 'shuffle_channel', 'shuffle_channel',
'space_to_depth', 'space_to_depth',
'yolo_box', 'yolo_box',
'yolov3_loss' 'yolov3_loss'
......
...@@ -31,8 +31,8 @@ from .. import functional ...@@ -31,8 +31,8 @@ from .. import functional
class HSigmoid(layers.Layer): class HSigmoid(layers.Layer):
""" """
:alias_main: paddle.nn.HSigmoid :alias_main: paddle.nn.HSigmoid
:alias: paddle.nn.HSigmoid,paddle.nn.layer.HSigmoid,paddle.nn.layer.activation.HSigmoid :alias: paddle.nn.HSigmoid,paddle.nn.layer.HSigmoid,paddle.nn.layer.activation.HSigmoid
Hierarchical Sigmoid Layer. Hierarchical Sigmoid Layer.
...@@ -167,8 +167,8 @@ class HSigmoid(layers.Layer): ...@@ -167,8 +167,8 @@ class HSigmoid(layers.Layer):
class ReLU(layers.Layer): class ReLU(layers.Layer):
""" """
:alias_main: paddle.nn.ReLU :alias_main: paddle.nn.ReLU
:alias: paddle.nn.ReLU,paddle.nn.layer.ReLU,paddle.nn.layer.activation.ReLU :alias: paddle.nn.ReLU,paddle.nn.layer.ReLU,paddle.nn.layer.activation.ReLU
ReLU Activation. ReLU Activation.
...@@ -209,8 +209,8 @@ class ReLU(layers.Layer): ...@@ -209,8 +209,8 @@ class ReLU(layers.Layer):
class Sigmoid(layers.Layer): class Sigmoid(layers.Layer):
""" """
:alias_main: paddle.nn.Sigmoid :alias_main: paddle.nn.Sigmoid
:alias: paddle.nn.Sigmoid,paddle.nn.layer.Sigmoid,paddle.nn.layer.activation.Sigmoid :alias: paddle.nn.Sigmoid,paddle.nn.layer.Sigmoid,paddle.nn.layer.activation.Sigmoid
Sigmoid Activation. Sigmoid Activation.
...@@ -254,8 +254,8 @@ class Sigmoid(layers.Layer): ...@@ -254,8 +254,8 @@ class Sigmoid(layers.Layer):
class LogSoftmax(layers.Layer): class LogSoftmax(layers.Layer):
""" """
:alias_main: paddle.nn.LogSoftmax :alias_main: paddle.nn.LogSoftmax
:alias: paddle.nn.LogSoftmax,paddle.nn.layer.LogSoftmax,paddle.nn.layer.activation.LogSoftmax :alias: paddle.nn.LogSoftmax,paddle.nn.layer.LogSoftmax,paddle.nn.layer.activation.LogSoftmax
This operator implements the log_softmax layer. The calculation process is as follows: This operator implements the log_softmax layer. The calculation process is as follows:
......
...@@ -40,8 +40,8 @@ def _get_default_param_initializer(num_channels, filter_size): ...@@ -40,8 +40,8 @@ def _get_default_param_initializer(num_channels, filter_size):
class Conv2D(layers.Layer): class Conv2D(layers.Layer):
""" """
:alias_main: paddle.nn.Conv2D :alias_main: paddle.nn.Conv2D
:alias: paddle.nn.Conv2D,paddle.nn.layer.Conv2D,paddle.nn.layer.conv.Conv2D :alias: paddle.nn.Conv2D,paddle.nn.layer.Conv2D,paddle.nn.layer.conv.Conv2D
This interface is used to construct a callable object of the ``Conv2D`` class. This interface is used to construct a callable object of the ``Conv2D`` class.
For more details, refer to code examples. For more details, refer to code examples.
...@@ -238,8 +238,8 @@ class Conv2D(layers.Layer): ...@@ -238,8 +238,8 @@ class Conv2D(layers.Layer):
class Conv2DTranspose(layers.Layer): class Conv2DTranspose(layers.Layer):
""" """
:alias_main: paddle.nn.Conv2DTranspose :alias_main: paddle.nn.Conv2DTranspose
:alias: paddle.nn.Conv2DTranspose,paddle.nn.layer.Conv2DTranspose,paddle.nn.layer.conv.Conv2DTranspose :alias: paddle.nn.Conv2DTranspose,paddle.nn.layer.Conv2DTranspose,paddle.nn.layer.conv.Conv2DTranspose
This interface is used to construct a callable object of the ``Conv2DTranspose`` class. This interface is used to construct a callable object of the ``Conv2DTranspose`` class.
For more details, refer to code examples. For more details, refer to code examples.
...@@ -437,8 +437,8 @@ class Conv2DTranspose(layers.Layer): ...@@ -437,8 +437,8 @@ class Conv2DTranspose(layers.Layer):
class Conv3D(layers.Layer): class Conv3D(layers.Layer):
""" """
:alias_main: paddle.nn.Conv3D :alias_main: paddle.nn.Conv3D
:alias: paddle.nn.Conv3D,paddle.nn.layer.Conv3D,paddle.nn.layer.conv.Conv3D :alias: paddle.nn.Conv3D,paddle.nn.layer.Conv3D,paddle.nn.layer.conv.Conv3D
**Convlution3D Layer** **Convlution3D Layer**
...@@ -630,8 +630,8 @@ class Conv3D(layers.Layer): ...@@ -630,8 +630,8 @@ class Conv3D(layers.Layer):
class Conv3DTranspose(layers.Layer): class Conv3DTranspose(layers.Layer):
""" """
:alias_main: paddle.nn.Conv3DTranspose :alias_main: paddle.nn.Conv3DTranspose
:alias: paddle.nn.Conv3DTranspose,paddle.nn.layer.Conv3DTranspose,paddle.nn.layer.conv.Conv3DTranspose :alias: paddle.nn.Conv3DTranspose,paddle.nn.layer.Conv3DTranspose,paddle.nn.layer.conv.Conv3DTranspose
**Convlution3D transpose layer** **Convlution3D transpose layer**
......
...@@ -20,8 +20,8 @@ from .. import functional as F ...@@ -20,8 +20,8 @@ from .. import functional as F
class RowConv(layers.Layer): class RowConv(layers.Layer):
""" """
:alias_main: paddle.nn.RowConv :alias_main: paddle.nn.RowConv
:alias: paddle.nn.RowConv,paddle.nn.layer.RowConv,paddle.nn.layer.extension.RowConv :alias: paddle.nn.RowConv,paddle.nn.layer.RowConv,paddle.nn.layer.extension.RowConv
**Row-convolution operator** **Row-convolution operator**
......
...@@ -28,8 +28,8 @@ __all__ = [ ...@@ -28,8 +28,8 @@ __all__ = [
class CrossEntropyLoss(fluid.dygraph.Layer): class CrossEntropyLoss(fluid.dygraph.Layer):
""" """
:alias_main: paddle.nn.CrossEntropyLoss :alias_main: paddle.nn.CrossEntropyLoss
:alias: paddle.nn.CrossEntropyLoss,paddle.nn.layer.CrossEntropyLoss,paddle.nn.layer.loss.CrossEntropyLoss :alias: paddle.nn.CrossEntropyLoss,paddle.nn.layer.CrossEntropyLoss,paddle.nn.layer.loss.CrossEntropyLoss
This operator implements the cross entropy loss function. This OP combines ``LogSoftmax``, This operator implements the cross entropy loss function. This OP combines ``LogSoftmax``,
and ``NLLLoss`` together. and ``NLLLoss`` together.
...@@ -146,8 +146,8 @@ class CrossEntropyLoss(fluid.dygraph.Layer): ...@@ -146,8 +146,8 @@ class CrossEntropyLoss(fluid.dygraph.Layer):
class MSELoss(fluid.dygraph.layers.Layer): class MSELoss(fluid.dygraph.layers.Layer):
""" """
:alias_main: paddle.nn.MSELoss :alias_main: paddle.nn.MSELoss
:alias: paddle.nn.MSELoss,paddle.nn.layer.MSELoss,paddle.nn.layer.loss.MSELoss :alias: paddle.nn.MSELoss,paddle.nn.layer.MSELoss,paddle.nn.layer.loss.MSELoss
**Mean Square Error Loss** **Mean Square Error Loss**
Computes the mean square error (squared L2 norm) of given input and label. Computes the mean square error (squared L2 norm) of given input and label.
...@@ -250,8 +250,8 @@ class MSELoss(fluid.dygraph.layers.Layer): ...@@ -250,8 +250,8 @@ class MSELoss(fluid.dygraph.layers.Layer):
class L1Loss(fluid.dygraph.Layer): class L1Loss(fluid.dygraph.Layer):
""" """
:alias_main: paddle.nn.L1Loss :alias_main: paddle.nn.L1Loss
:alias: paddle.nn.L1Loss,paddle.nn.layer.L1Loss,paddle.nn.layer.loss.L1Loss :alias: paddle.nn.L1Loss,paddle.nn.layer.L1Loss,paddle.nn.layer.loss.L1Loss
This interface is used to construct a callable object of the ``L1Loss`` class. This interface is used to construct a callable object of the ``L1Loss`` class.
The L1Loss layer calculates the L1 Loss of input predictions and target The L1Loss layer calculates the L1 Loss of input predictions and target
...@@ -340,8 +340,8 @@ class L1Loss(fluid.dygraph.Layer): ...@@ -340,8 +340,8 @@ class L1Loss(fluid.dygraph.Layer):
class BCELoss(fluid.dygraph.Layer): class BCELoss(fluid.dygraph.Layer):
""" """
:alias_main: paddle.nn.BCELoss :alias_main: paddle.nn.BCELoss
:alias: paddle.nn.BCELoss,paddle.nn.layer.BCELoss,paddle.nn.layer.loss.BCELoss :alias: paddle.nn.BCELoss,paddle.nn.layer.BCELoss,paddle.nn.layer.loss.BCELoss
This interface is used to construct a callable object of the ``BCELoss`` class. This interface is used to construct a callable object of the ``BCELoss`` class.
The BCELoss layer measures the binary_cross_entropy loss between input predictions The BCELoss layer measures the binary_cross_entropy loss between input predictions
...@@ -468,8 +468,8 @@ class BCELoss(fluid.dygraph.Layer): ...@@ -468,8 +468,8 @@ class BCELoss(fluid.dygraph.Layer):
class NLLLoss(fluid.dygraph.Layer): class NLLLoss(fluid.dygraph.Layer):
""" """
:alias_main: paddle.nn.NLLLoss :alias_main: paddle.nn.NLLLoss
:alias: paddle.nn.NLLLoss,paddle.nn.layer.NLLLoss,paddle.nn.layer.loss.NLLLoss :alias: paddle.nn.NLLLoss,paddle.nn.layer.NLLLoss,paddle.nn.layer.loss.NLLLoss
This op accepts input and target label and returns negative log likelihood This op accepts input and target label and returns negative log likelihood
cross error. It is useful to train a classification problem with C classes. cross error. It is useful to train a classification problem with C classes.
......
...@@ -22,7 +22,7 @@ from __future__ import print_function ...@@ -22,7 +22,7 @@ from __future__ import print_function
from .random import randperm from .random import randperm
from .attribute import rank #DEFINE_ALIAS from .attribute import rank #DEFINE_ALIAS
from .attribute import shape #DEFINE_ALIAS from .attribute import shape #DEFINE_ALIAS
# from .creation import create_tensor #DEFINE_ALIAS from .creation import create_tensor #DEFINE_ALIAS
# from .creation import create_lod_tensor #DEFINE_ALIAS # from .creation import create_lod_tensor #DEFINE_ALIAS
# from .creation import create_random_int_lodtensor #DEFINE_ALIAS # from .creation import create_random_int_lodtensor #DEFINE_ALIAS
from .creation import crop_tensor #DEFINE_ALIAS from .creation import crop_tensor #DEFINE_ALIAS
...@@ -78,13 +78,13 @@ from .manipulation import expand #DEFINE_ALIAS ...@@ -78,13 +78,13 @@ from .manipulation import expand #DEFINE_ALIAS
from .manipulation import expand_as #DEFINE_ALIAS from .manipulation import expand_as #DEFINE_ALIAS
from .manipulation import flatten #DEFINE_ALIAS from .manipulation import flatten #DEFINE_ALIAS
from .manipulation import gather #DEFINE_ALIAS from .manipulation import gather #DEFINE_ALIAS
# from .manipulation import gather_nd #DEFINE_ALIAS from .manipulation import gather_nd #DEFINE_ALIAS
from .manipulation import reshape #DEFINE_ALIAS from .manipulation import reshape #DEFINE_ALIAS
from .manipulation import reverse #DEFINE_ALIAS from .manipulation import reverse #DEFINE_ALIAS
from .manipulation import scatter #DEFINE_ALIAS from .manipulation import scatter #DEFINE_ALIAS
# from .manipulation import scatter_nd_add #DEFINE_ALIAS from .manipulation import scatter_nd_add #DEFINE_ALIAS
# from .manipulation import scatter_nd #DEFINE_ALIAS from .manipulation import scatter_nd #DEFINE_ALIAS
# from .manipulation import shard_index #DEFINE_ALIAS from .manipulation import shard_index #DEFINE_ALIAS
from .manipulation import slice #DEFINE_ALIAS from .manipulation import slice #DEFINE_ALIAS
from .manipulation import split #DEFINE_ALIAS from .manipulation import split #DEFINE_ALIAS
from .manipulation import squeeze #DEFINE_ALIAS from .manipulation import squeeze #DEFINE_ALIAS
...@@ -92,11 +92,11 @@ from .manipulation import stack #DEFINE_ALIAS ...@@ -92,11 +92,11 @@ from .manipulation import stack #DEFINE_ALIAS
from .manipulation import strided_slice #DEFINE_ALIAS from .manipulation import strided_slice #DEFINE_ALIAS
from .manipulation import transpose #DEFINE_ALIAS from .manipulation import transpose #DEFINE_ALIAS
from .manipulation import unique #DEFINE_ALIAS from .manipulation import unique #DEFINE_ALIAS
# from .manipulation import unique_with_counts #DEFINE_ALIAS from .manipulation import unique_with_counts #DEFINE_ALIAS
from .manipulation import unsqueeze #DEFINE_ALIAS from .manipulation import unsqueeze #DEFINE_ALIAS
from .manipulation import unstack #DEFINE_ALIAS from .manipulation import unstack #DEFINE_ALIAS
from .manipulation import flip #DEFINE_ALIAS from .manipulation import flip #DEFINE_ALIAS
# from .manipulation import unbind #DEFINE_ALIAS from .manipulation import unbind #DEFINE_ALIAS
from .manipulation import roll #DEFINE_ALIAS from .manipulation import roll #DEFINE_ALIAS
from .math import abs #DEFINE_ALIAS from .math import abs #DEFINE_ALIAS
from .math import acos #DEFINE_ALIAS from .math import acos #DEFINE_ALIAS
...@@ -116,10 +116,10 @@ from .math import elementwise_pow #DEFINE_ALIAS ...@@ -116,10 +116,10 @@ from .math import elementwise_pow #DEFINE_ALIAS
from .math import elementwise_sub #DEFINE_ALIAS from .math import elementwise_sub #DEFINE_ALIAS
from .math import exp #DEFINE_ALIAS from .math import exp #DEFINE_ALIAS
from .math import floor #DEFINE_ALIAS from .math import floor #DEFINE_ALIAS
# from .math import increment #DEFINE_ALIAS from .math import increment #DEFINE_ALIAS
from .math import log #DEFINE_ALIAS from .math import log #DEFINE_ALIAS
from .math import mul #DEFINE_ALIAS from .math import mul #DEFINE_ALIAS
# from .math import multiplex #DEFINE_ALIAS from .math import multiplex #DEFINE_ALIAS
from .math import pow #DEFINE_ALIAS from .math import pow #DEFINE_ALIAS
from .math import reciprocal #DEFINE_ALIAS from .math import reciprocal #DEFINE_ALIAS
from .math import reduce_max #DEFINE_ALIAS from .math import reduce_max #DEFINE_ALIAS
...@@ -135,7 +135,7 @@ from .math import sqrt #DEFINE_ALIAS ...@@ -135,7 +135,7 @@ from .math import sqrt #DEFINE_ALIAS
from .math import square #DEFINE_ALIAS from .math import square #DEFINE_ALIAS
from .math import stanh #DEFINE_ALIAS from .math import stanh #DEFINE_ALIAS
from .math import sum #DEFINE_ALIAS from .math import sum #DEFINE_ALIAS
# from .math import sums #DEFINE_ALIAS from .math import sums #DEFINE_ALIAS
from .math import tanh #DEFINE_ALIAS from .math import tanh #DEFINE_ALIAS
from .math import elementwise_sum #DEFINE_ALIAS from .math import elementwise_sum #DEFINE_ALIAS
from .math import max #DEFINE_ALIAS from .math import max #DEFINE_ALIAS
...@@ -151,7 +151,6 @@ from .math import erf #DEFINE_ALIAS ...@@ -151,7 +151,6 @@ from .math import erf #DEFINE_ALIAS
from .math import addcmul #DEFINE_ALIAS from .math import addcmul #DEFINE_ALIAS
from .math import addmm #DEFINE_ALIAS from .math import addmm #DEFINE_ALIAS
from .math import clamp #DEFINE_ALIAS from .math import clamp #DEFINE_ALIAS
from .manipulation import unbind #DEFINE_ALIAS
from .math import trace #DEFINE_ALIAS from .math import trace #DEFINE_ALIAS
from .math import kron #DEFINE_ALIAS from .math import kron #DEFINE_ALIAS
# from .random import gaussin #DEFINE_ALIAS # from .random import gaussin #DEFINE_ALIAS
......
...@@ -28,8 +28,10 @@ from ..fluid.layers import diag #DEFINE_ALIAS ...@@ -28,8 +28,10 @@ from ..fluid.layers import diag #DEFINE_ALIAS
from ..fluid.layers import eye #DEFINE_ALIAS from ..fluid.layers import eye #DEFINE_ALIAS
from ..fluid.layers import fill_constant #DEFINE_ALIAS from ..fluid.layers import fill_constant #DEFINE_ALIAS
from ..fluid.layers import create_tensor #DEFINE_ALIAS
__all__ = [ __all__ = [
# 'create_tensor', 'create_tensor',
# 'create_lod_tensor', # 'create_lod_tensor',
# 'create_random_int_lodtensor', # 'create_random_int_lodtensor',
'crop_tensor', 'crop_tensor',
...@@ -60,8 +62,8 @@ def full_like(input, ...@@ -60,8 +62,8 @@ def full_like(input,
stop_gradient=True, stop_gradient=True,
name=None): name=None):
""" """
:alias_main: paddle.full_like :alias_main: paddle.full_like
:alias: paddle.full_like,paddle.tensor.full_like,paddle.tensor.creation.full_like :alias: paddle.full_like,paddle.tensor.full_like,paddle.tensor.creation.full_like
**full_like** **full_like**
This function creates a tensor filled with `fill_value` which has identical shape and dtype This function creates a tensor filled with `fill_value` which has identical shape and dtype
...@@ -121,8 +123,8 @@ def full_like(input, ...@@ -121,8 +123,8 @@ def full_like(input,
def linspace(start, stop, num, dtype, out=None, device=None, name=None): def linspace(start, stop, num, dtype, out=None, device=None, name=None):
""" """
:alias_main: paddle.linspace :alias_main: paddle.linspace
:alias: paddle.linspace,paddle.tensor.linspace,paddle.tensor.creation.linspace :alias: paddle.linspace,paddle.tensor.linspace,paddle.tensor.creation.linspace
This OP return fixed number of evenly spaced values within a given interval. This OP return fixed number of evenly spaced values within a given interval.
...@@ -210,8 +212,8 @@ def linspace(start, stop, num, dtype, out=None, device=None, name=None): ...@@ -210,8 +212,8 @@ def linspace(start, stop, num, dtype, out=None, device=None, name=None):
def ones(shape, dtype=None, out=None, device=None): def ones(shape, dtype=None, out=None, device=None):
""" """
:alias_main: paddle.ones :alias_main: paddle.ones
:alias: paddle.ones,paddle.tensor.ones,paddle.tensor.creation.ones :alias: paddle.ones,paddle.tensor.ones,paddle.tensor.creation.ones
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1. The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1.
...@@ -252,8 +254,8 @@ def ones(shape, dtype=None, out=None, device=None): ...@@ -252,8 +254,8 @@ def ones(shape, dtype=None, out=None, device=None):
def ones_like(input, dtype=None, device=None, name=None): def ones_like(input, dtype=None, device=None, name=None):
""" """
:alias_main: paddle.ones_like :alias_main: paddle.ones_like
:alias: paddle.ones_like,paddle.tensor.ones_like,paddle.tensor.creation.ones_like :alias: paddle.ones_like,paddle.tensor.ones_like,paddle.tensor.creation.ones_like
This function creates a ones tensor which has identical shape and dtype This function creates a ones tensor which has identical shape and dtype
with `input`. with `input`.
...@@ -322,8 +324,8 @@ def ones_like(input, dtype=None, device=None, name=None): ...@@ -322,8 +324,8 @@ def ones_like(input, dtype=None, device=None, name=None):
def zeros(shape, dtype, out=None, device=None): def zeros(shape, dtype, out=None, device=None):
""" """
:alias_main: paddle.zeros :alias_main: paddle.zeros
:alias: paddle.zeros,paddle.tensor.zeros,paddle.tensor.creation.zeros :alias: paddle.zeros,paddle.tensor.zeros,paddle.tensor.creation.zeros
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0. The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
...@@ -364,8 +366,8 @@ def zeros(shape, dtype, out=None, device=None): ...@@ -364,8 +366,8 @@ def zeros(shape, dtype, out=None, device=None):
def zeros_like(input, dtype=None, device=None, name=None): def zeros_like(input, dtype=None, device=None, name=None):
""" """
:alias_main: paddle.zeros_like :alias_main: paddle.zeros_like
:alias: paddle.zeros_like,paddle.tensor.zeros_like,paddle.tensor.creation.zeros_like :alias: paddle.zeros_like,paddle.tensor.zeros_like,paddle.tensor.creation.zeros_like
This function creates a zeros tensor which has identical shape and dtype This function creates a zeros tensor which has identical shape and dtype
with `input`. with `input`.
...@@ -503,8 +505,8 @@ def full(shape, ...@@ -503,8 +505,8 @@ def full(shape,
stop_gradient=True, stop_gradient=True,
name=None): name=None):
""" """
:alias_main: paddle.full :alias_main: paddle.full
:alias: paddle.full,paddle.tensor.full,paddle.tensor.creation.full :alias: paddle.full,paddle.tensor.full,paddle.tensor.creation.full
This Op return a Tensor with the `fill_value` which size is same as `shape` This Op return a Tensor with the `fill_value` which size is same as `shape`
...@@ -583,8 +585,8 @@ def full(shape, ...@@ -583,8 +585,8 @@ def full(shape,
def arange(start, end, step=1, dtype=None, name=None): def arange(start, end, step=1, dtype=None, name=None):
""" """
:alias_main: paddle.arange :alias_main: paddle.arange
:alias: paddle.arange,paddle.tensor.arange,paddle.tensor.creation.arange :alias: paddle.arange,paddle.tensor.arange,paddle.tensor.creation.arange
Return evenly spaced values within a given interval. Return evenly spaced values within a given interval.
...@@ -690,8 +692,8 @@ def _tril_triu_op(helper): ...@@ -690,8 +692,8 @@ def _tril_triu_op(helper):
def tril(input, diagonal=0, name=None): def tril(input, diagonal=0, name=None):
""" """
:alias_main: paddle.tril :alias_main: paddle.tril
:alias: paddle.tril,paddle.tensor.tril,paddle.tensor.creation.tril :alias: paddle.tril,paddle.tensor.tril,paddle.tensor.creation.tril
This op returns the lower triangular part of a matrix (2-D tensor) or batch This op returns the lower triangular part of a matrix (2-D tensor) or batch
of matrices :attr:`input`, the other elements of the result tensor are set of matrices :attr:`input`, the other elements of the result tensor are set
...@@ -767,8 +769,8 @@ def tril(input, diagonal=0, name=None): ...@@ -767,8 +769,8 @@ def tril(input, diagonal=0, name=None):
def triu(input, diagonal=0, name=None): def triu(input, diagonal=0, name=None):
""" """
:alias_main: paddle.triu :alias_main: paddle.triu
:alias: paddle.triu,paddle.tensor.triu,paddle.tensor.creation.triu :alias: paddle.triu,paddle.tensor.triu,paddle.tensor.creation.triu
This op returns the upper triangular part of a matrix (2-D tensor) or batch of matrices This op returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
:attr:`input`, the other elements of the result tensor are set to 0. :attr:`input`, the other elements of the result tensor are set to 0.
...@@ -844,8 +846,8 @@ def triu(input, diagonal=0, name=None): ...@@ -844,8 +846,8 @@ def triu(input, diagonal=0, name=None):
def meshgrid(input, name=None): def meshgrid(input, name=None):
""" """
:alias_main: paddle.meshgrid :alias_main: paddle.meshgrid
:alias: paddle.meshgrid,paddle.tensor.meshgrid,paddle.tensor.creation.meshgrid :alias: paddle.meshgrid,paddle.tensor.meshgrid,paddle.tensor.creation.meshgrid
This op takes a list of N tensors as input, each of which is 1-dimensional This op takes a list of N tensors as input, each of which is 1-dimensional
vector, and creates N-dimensional grids. vector, and creates N-dimensional grids.
......
...@@ -36,8 +36,8 @@ __all__ = [ ...@@ -36,8 +36,8 @@ __all__ = [
def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
""" """
:alias_main: paddle.matmul :alias_main: paddle.matmul
:alias: paddle.matmul,paddle.tensor.matmul,paddle.tensor.linalg.matmul :alias: paddle.matmul,paddle.tensor.matmul,paddle.tensor.linalg.matmul
Applies matrix multiplication to two tensors. Applies matrix multiplication to two tensors.
...@@ -172,8 +172,8 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): ...@@ -172,8 +172,8 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
def norm(input, p='fro', axis=None, keepdim=False, out=None, name=None): def norm(input, p='fro', axis=None, keepdim=False, out=None, name=None):
""" """
:alias_main: paddle.norm :alias_main: paddle.norm
:alias: paddle.norm,paddle.tensor.norm,paddle.tensor.linalg.norm :alias: paddle.norm,paddle.tensor.norm,paddle.tensor.linalg.norm
Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean
or 2-norm, and in general the p-norm for p > 0) of a given tensor. or 2-norm, and in general the p-norm for p > 0) of a given tensor.
...@@ -350,8 +350,8 @@ def norm(input, p='fro', axis=None, keepdim=False, out=None, name=None): ...@@ -350,8 +350,8 @@ def norm(input, p='fro', axis=None, keepdim=False, out=None, name=None):
def dist(x, y, p=2): def dist(x, y, p=2):
""" """
:alias_main: paddle.dist :alias_main: paddle.dist
:alias: paddle.dist,paddle.tensor.dist,paddle.tensor.linalg.dist :alias: paddle.dist,paddle.tensor.dist,paddle.tensor.linalg.dist
This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure
of distance. The shapes of x and y must be broadcastable. The definition is as follows, for of distance. The shapes of x and y must be broadcastable. The definition is as follows, for
...@@ -451,8 +451,8 @@ def dist(x, y, p=2): ...@@ -451,8 +451,8 @@ def dist(x, y, p=2):
def dot(x, y, name=None): def dot(x, y, name=None):
""" """
:alias_main: paddle.dot :alias_main: paddle.dot
:alias: paddle.dot,paddle.tensor.dot,paddle.tensor.linalg.dot :alias: paddle.dot,paddle.tensor.dot,paddle.tensor.linalg.dot
This operator calculates inner product for vectors. This operator calculates inner product for vectors.
...@@ -510,8 +510,8 @@ def dot(x, y, name=None): ...@@ -510,8 +510,8 @@ def dot(x, y, name=None):
def t(input, name=None): def t(input, name=None):
""" """
:alias_main: paddle.t :alias_main: paddle.t
:alias: paddle.t,paddle.tensor.t,paddle.tensor.linalg.t :alias: paddle.t,paddle.tensor.t,paddle.tensor.linalg.t
Transpose <=2-D tensor. Transpose <=2-D tensor.
0-D and 1-D tensors are returned as it is and 2-D tensor is equal to 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to
...@@ -584,8 +584,8 @@ def t(input, name=None): ...@@ -584,8 +584,8 @@ def t(input, name=None):
def cross(input, other, dim=None): def cross(input, other, dim=None):
""" """
:alias_main: paddle.cross :alias_main: paddle.cross
:alias: paddle.cross,paddle.tensor.cross,paddle.tensor.linalg.cross :alias: paddle.cross,paddle.tensor.cross,paddle.tensor.linalg.cross
Returns the cross product of vectors in dimension `dim` of the `input` and `other` tensor. Returns the cross product of vectors in dimension `dim` of the `input` and `other` tensor.
Inputs must have the same shape, and the size of their dim-th dimension should be equla to 3. Inputs must have the same shape, and the size of their dim-th dimension should be equla to 3.
...@@ -649,8 +649,8 @@ def cross(input, other, dim=None): ...@@ -649,8 +649,8 @@ def cross(input, other, dim=None):
def cholesky(x, upper=False): def cholesky(x, upper=False):
""" """
:alias_main: paddle.cholesky :alias_main: paddle.cholesky
:alias: paddle.cholesky,paddle.tensor.cholesky,paddle.tensor.linalg.cholesky :alias: paddle.cholesky,paddle.tensor.cholesky,paddle.tensor.linalg.cholesky
Computes the Cholesky decomposition of one symmetric positive-definite Computes the Cholesky decomposition of one symmetric positive-definite
matrix or batches of symmetric positive-definite matrice. matrix or batches of symmetric positive-definite matrice.
...@@ -705,8 +705,8 @@ def cholesky(x, upper=False): ...@@ -705,8 +705,8 @@ def cholesky(x, upper=False):
def bmm(x, y, name=None): def bmm(x, y, name=None):
""" """
:alias_main: paddle.bmm :alias_main: paddle.bmm
:alias: paddle.bmm,paddle.tensor.bmm,paddle.tensor.linalg.bmm :alias: paddle.bmm,paddle.tensor.bmm,paddle.tensor.linalg.bmm
Applies batched matrix multiplication to two tensors. Applies batched matrix multiplication to two tensors.
......
...@@ -54,8 +54,8 @@ __all__ = [ ...@@ -54,8 +54,8 @@ __all__ = [
def equal(x, y, axis=-1, name=None): def equal(x, y, axis=-1, name=None):
""" """
:alias_main: paddle.equal :alias_main: paddle.equal
:alias: paddle.equal,paddle.tensor.equal,paddle.tensor.logic.equal :alias: paddle.equal,paddle.tensor.equal,paddle.tensor.logic.equal
This OP returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise. This OP returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise.
...@@ -125,8 +125,8 @@ def equal(x, y, axis=-1, name=None): ...@@ -125,8 +125,8 @@ def equal(x, y, axis=-1, name=None):
@templatedoc() @templatedoc()
def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
""" """
:alias_main: paddle.allclose :alias_main: paddle.allclose
:alias: paddle.allclose,paddle.tensor.allclose,paddle.tensor.logic.allclose :alias: paddle.allclose,paddle.tensor.allclose,paddle.tensor.logic.allclose
${comment} ${comment}
...@@ -210,8 +210,8 @@ def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): ...@@ -210,8 +210,8 @@ def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
def elementwise_equal(x, y, name=None): def elementwise_equal(x, y, name=None):
""" """
:alias_main: paddle.elementwise_equal :alias_main: paddle.elementwise_equal
:alias: paddle.elementwise_equal,paddle.tensor.elementwise_equal,paddle.tensor.logic.elementwise_equal :alias: paddle.elementwise_equal,paddle.tensor.elementwise_equal,paddle.tensor.logic.elementwise_equal
This layer returns the truth value of :math:`x == y` elementwise. This layer returns the truth value of :math:`x == y` elementwise.
......
...@@ -36,40 +36,25 @@ from ..fluid.layers import transpose #DEFINE_ALIAS ...@@ -36,40 +36,25 @@ from ..fluid.layers import transpose #DEFINE_ALIAS
from ..fluid.layers import unique #DEFINE_ALIAS from ..fluid.layers import unique #DEFINE_ALIAS
from ..fluid.layers import unstack #DEFINE_ALIAS from ..fluid.layers import unstack #DEFINE_ALIAS
from ..fluid.layers import gather_nd #DEFINE_ALIAS
from ..fluid.layers import scatter_nd_add #DEFINE_ALIAS
from ..fluid.layers import scatter_nd #DEFINE_ALIAS
from ..fluid.layers import shard_index #DEFINE_ALIAS
from ..fluid.layers import unique_with_counts #DEFINE_ALIAS
__all__ = [ __all__ = [
'cast', 'cast', 'concat', 'expand', 'expand_as', 'flatten', 'gather', 'gather_nd',
'concat', 'reshape', 'reverse', 'scatter', 'scatter_nd_add', 'scatter_nd',
'expand', 'shard_index', 'slice', 'split', 'squeeze', 'stack', 'strided_slice',
'expand_as', 'transpose', 'unique', 'unique_with_counts', 'unsqueeze', 'unstack', 'flip',
'flatten', 'unbind', 'roll'
'gather',
# 'gather_nd',
'reshape',
'reverse',
'scatter',
# 'scatter_nd_add',
# 'scatter_nd',
# 'shard_index',
'slice',
'split',
'squeeze',
'stack',
'strided_slice',
'transpose',
'unique',
# 'unique_with_counts',
'unsqueeze',
'unstack',
'flip',
'unbind',
'roll'
] ]
def flip(input, dims, name=None): def flip(input, dims, name=None):
""" """
:alias_main: paddle.flip :alias_main: paddle.flip
:alias: paddle.flip,paddle.tensor.flip,paddle.tensor.manipulation.flip :alias: paddle.flip,paddle.tensor.flip,paddle.tensor.manipulation.flip
Reverse the order of a n-D tensor along given axis in dims. Reverse the order of a n-D tensor along given axis in dims.
...@@ -121,8 +106,8 @@ def flip(input, dims, name=None): ...@@ -121,8 +106,8 @@ def flip(input, dims, name=None):
def roll(input, shifts, dims=None): def roll(input, shifts, dims=None):
""" """
:alias_main: paddle.roll :alias_main: paddle.roll
:alias: paddle.roll,paddle.tensor.roll,paddle.tensor.manipulation.roll :alias: paddle.roll,paddle.tensor.roll,paddle.tensor.manipulation.roll
Roll the `input` tensor along the given dimension(s). Elements that are shifted beyond Roll the `input` tensor along the given dimension(s). Elements that are shifted beyond
the last position are re-introduced at the first position. If a dimension is not specified, the last position are re-introduced at the first position. If a dimension is not specified,
...@@ -195,8 +180,8 @@ def roll(input, shifts, dims=None): ...@@ -195,8 +180,8 @@ def roll(input, shifts, dims=None):
def stack(x, axis=0, out=None, name=None): def stack(x, axis=0, out=None, name=None):
""" """
:alias_main: paddle.stack :alias_main: paddle.stack
:alias: paddle.stack,paddle.tensor.stack,paddle.tensor.manipulation.stack :alias: paddle.stack,paddle.tensor.stack,paddle.tensor.manipulation.stack
This OP stacks all the inputs :code:`x` along axis. This OP stacks all the inputs :code:`x` along axis.
...@@ -308,8 +293,8 @@ def stack(x, axis=0, out=None, name=None): ...@@ -308,8 +293,8 @@ def stack(x, axis=0, out=None, name=None):
def split(input, num_or_sections, dim=-1, name=None): def split(input, num_or_sections, dim=-1, name=None):
""" """
:alias_main: paddle.split :alias_main: paddle.split
:alias: paddle.split,paddle.tensor.split,paddle.tensor.manipulation.split :alias: paddle.split,paddle.tensor.split,paddle.tensor.manipulation.split
Split the input tensor into multiple sub-Tensors. Split the input tensor into multiple sub-Tensors.
Args: Args:
...@@ -447,8 +432,8 @@ def split(input, num_or_sections, dim=-1, name=None): ...@@ -447,8 +432,8 @@ def split(input, num_or_sections, dim=-1, name=None):
def squeeze(input, axes, out=None, name=None): def squeeze(input, axes, out=None, name=None):
""" """
:alias_main: paddle.squeeze :alias_main: paddle.squeeze
:alias: paddle.squeeze,paddle.tensor.squeeze,paddle.tensor.manipulation.squeeze :alias: paddle.squeeze,paddle.tensor.squeeze,paddle.tensor.manipulation.squeeze
This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will
remove the dims by axes, the dims selected by axes should be one. If not provide axes, all dims equal remove the dims by axes, the dims selected by axes should be one. If not provide axes, all dims equal
...@@ -526,8 +511,8 @@ def squeeze(input, axes, out=None, name=None): ...@@ -526,8 +511,8 @@ def squeeze(input, axes, out=None, name=None):
def unsqueeze(input, axes, out=None, name=None): def unsqueeze(input, axes, out=None, name=None):
""" """
:alias_main: paddle.unsqueeze :alias_main: paddle.unsqueeze
:alias: paddle.unsqueeze,paddle.tensor.unsqueeze,paddle.tensor.manipulation.unsqueeze :alias: paddle.unsqueeze,paddle.tensor.unsqueeze,paddle.tensor.manipulation.unsqueeze
Insert single-dimensional entries to the shape of a Tensor. Takes one Insert single-dimensional entries to the shape of a Tensor. Takes one
required argument axes, a list of dimensions that will be inserted. required argument axes, a list of dimensions that will be inserted.
...@@ -609,8 +594,8 @@ def unsqueeze(input, axes, out=None, name=None): ...@@ -609,8 +594,8 @@ def unsqueeze(input, axes, out=None, name=None):
def gather(input, index, overwrite=True): def gather(input, index, overwrite=True):
""" """
:alias_main: paddle.gather :alias_main: paddle.gather
:alias: paddle.gather,paddle.tensor.gather,paddle.tensor.manipulation.gather :alias: paddle.gather,paddle.tensor.gather,paddle.tensor.manipulation.gather
**Gather Layer** **Gather Layer**
...@@ -683,8 +668,8 @@ def gather(input, index, overwrite=True): ...@@ -683,8 +668,8 @@ def gather(input, index, overwrite=True):
def unbind(input, axis=0): def unbind(input, axis=0):
""" """
:alias_main: paddle.tensor.unbind :alias_main: paddle.tensor.unbind
:alias: paddle.tensor.unbind,paddle.tensor.manipulation.unbind :alias: paddle.tensor.unbind,paddle.tensor.manipulation.unbind
Removes a tensor dimension, then split the input tensor into multiple sub-Tensors. Removes a tensor dimension, then split the input tensor into multiple sub-Tensors.
Args: Args:
......
...@@ -58,6 +58,10 @@ from ..fluid.layers import stanh #DEFINE_ALIAS ...@@ -58,6 +58,10 @@ from ..fluid.layers import stanh #DEFINE_ALIAS
from ..fluid.layers import atan #DEFINE_ALIAS from ..fluid.layers import atan #DEFINE_ALIAS
from ..fluid.layers import erf #DEFINE_ALIAS from ..fluid.layers import erf #DEFINE_ALIAS
from ..fluid.layers import increment #DEFINE_ALIAS
from ..fluid.layers import multiplex #DEFINE_ALIAS
from ..fluid.layers import sums #DEFINE_ALIAS
__all__ = [ __all__ = [
'abs', 'abs',
'acos', 'acos',
...@@ -77,10 +81,10 @@ __all__ = [ ...@@ -77,10 +81,10 @@ __all__ = [
'elementwise_sub', 'elementwise_sub',
'exp', 'exp',
'floor', 'floor',
# 'increment', 'increment',
'log', 'log',
'mul', 'mul',
# 'multiplex', 'multiplex',
'pow', 'pow',
'reciprocal', 'reciprocal',
'reduce_max', 'reduce_max',
...@@ -96,7 +100,7 @@ __all__ = [ ...@@ -96,7 +100,7 @@ __all__ = [
'square', 'square',
'stanh', 'stanh',
'sum', 'sum',
# 'sums', 'sums',
'tanh', 'tanh',
'elementwise_sum', 'elementwise_sum',
'max', 'max',
...@@ -116,6 +120,7 @@ __all__ = [ ...@@ -116,6 +120,7 @@ __all__ = [
'kron' 'kron'
] ]
# yapf: enable. # yapf: enable.
......
...@@ -47,8 +47,8 @@ def randint(low, ...@@ -47,8 +47,8 @@ def randint(low,
seed=0, seed=0,
name=None): name=None):
""" """
:alias_main: paddle.randint :alias_main: paddle.randint
:alias: paddle.randint,paddle.tensor.randint,paddle.tensor.random.randint :alias: paddle.randint,paddle.tensor.randint,paddle.tensor.random.randint
This function returns a Tensor filled with random integers from the "discrete uniform" distribution of the This function returns a Tensor filled with random integers from the "discrete uniform" distribution of the
specified data type in the interval [low, high). If high is None (the default), then results are from [0, low). specified data type in the interval [low, high). If high is None (the default), then results are from [0, low).
...@@ -213,8 +213,8 @@ def randn(shape, ...@@ -213,8 +213,8 @@ def randn(shape,
stop_gradient=True, stop_gradient=True,
name=None): name=None):
""" """
:alias_main: paddle.randn :alias_main: paddle.randn
:alias: paddle.randn,paddle.tensor.randn,paddle.tensor.random.randn :alias: paddle.randn,paddle.tensor.randn,paddle.tensor.random.randn
This function returns a tensor filled with random numbers from a normal This function returns a tensor filled with random numbers from a normal
distribution with mean 0 and variance 1 (also called the standard normal distribution with mean 0 and variance 1 (also called the standard normal
...@@ -324,8 +324,8 @@ def randperm(n, ...@@ -324,8 +324,8 @@ def randperm(n,
stop_gradient=True, stop_gradient=True,
seed=0): seed=0):
""" """
:alias_main: paddle.randperm :alias_main: paddle.randperm
:alias: paddle.randperm,paddle.tensor.randperm,paddle.tensor.random.randperm :alias: paddle.randperm,paddle.tensor.randperm,paddle.tensor.random.randperm
${comment} ${comment}
...@@ -408,8 +408,8 @@ def randperm(n, ...@@ -408,8 +408,8 @@ def randperm(n,
def rand(shape, out=None, dtype=None, device=None, stop_gradient=True): def rand(shape, out=None, dtype=None, device=None, stop_gradient=True):
""" """
:alias_main: paddle.rand :alias_main: paddle.rand
:alias: paddle.rand,paddle.tensor.rand,paddle.tensor.random.rand :alias: paddle.rand,paddle.tensor.rand,paddle.tensor.random.rand
This OP initializes a variable with random values sampled from a This OP initializes a variable with random values sampled from a
uniform distribution in the range [0, 1). uniform distribution in the range [0, 1).
......
...@@ -44,8 +44,8 @@ from paddle.common_ops_import import * ...@@ -44,8 +44,8 @@ from paddle.common_ops_import import *
def argmax(input, axis=None, dtype=None, out=None, keepdims=False, name=None): def argmax(input, axis=None, dtype=None, out=None, keepdims=False, name=None):
""" """
:alias_main: paddle.argmax :alias_main: paddle.argmax
:alias: paddle.argmax,paddle.tensor.argmax,paddle.tensor.search.argmax :alias: paddle.argmax,paddle.tensor.argmax,paddle.tensor.search.argmax
This OP computes the indices of the max elements of the input tensor's This OP computes the indices of the max elements of the input tensor's
element along the provided axis. element along the provided axis.
...@@ -137,8 +137,8 @@ def argmax(input, axis=None, dtype=None, out=None, keepdims=False, name=None): ...@@ -137,8 +137,8 @@ def argmax(input, axis=None, dtype=None, out=None, keepdims=False, name=None):
def index_select(input, index, dim=0): def index_select(input, index, dim=0):
""" """
:alias_main: paddle.index_select :alias_main: paddle.index_select
:alias: paddle.index_select,paddle.tensor.index_select,paddle.tensor.search.index_select :alias: paddle.index_select,paddle.tensor.index_select,paddle.tensor.search.index_select
Returns a new tensor which indexes the `input` tensor along dimension `dim` using Returns a new tensor which indexes the `input` tensor along dimension `dim` using
the entries in `index` which is a Tensor. The returned tensor has the same number the entries in `index` which is a Tensor. The returned tensor has the same number
...@@ -201,8 +201,8 @@ def index_select(input, index, dim=0): ...@@ -201,8 +201,8 @@ def index_select(input, index, dim=0):
def nonzero(input, as_tuple=False): def nonzero(input, as_tuple=False):
""" """
:alias_main: paddle.nonzero :alias_main: paddle.nonzero
:alias: paddle.nonzero,paddle.tensor.nonzero,paddle.tensor.search.nonzero :alias: paddle.nonzero,paddle.tensor.nonzero,paddle.tensor.search.nonzero
Return a tensor containing the indices of all non-zero elements of the `input` Return a tensor containing the indices of all non-zero elements of the `input`
tensor. If as_tuple is True, return a tuple of 1-D tensors, one for each dimension tensor. If as_tuple is True, return a tuple of 1-D tensors, one for each dimension
...@@ -288,8 +288,8 @@ def nonzero(input, as_tuple=False): ...@@ -288,8 +288,8 @@ def nonzero(input, as_tuple=False):
def sort(input, axis=-1, descending=False, out=None, name=None): def sort(input, axis=-1, descending=False, out=None, name=None):
""" """
:alias_main: paddle.sort :alias_main: paddle.sort
:alias: paddle.sort,paddle.tensor.sort,paddle.tensor.search.sort :alias: paddle.sort,paddle.tensor.sort,paddle.tensor.search.sort
This OP sorts the input along the given axis, and returns sorted output This OP sorts the input along the given axis, and returns sorted output
data Varibale and its corresponding index Variable with the same shape as data Varibale and its corresponding index Variable with the same shape as
...@@ -380,8 +380,8 @@ def sort(input, axis=-1, descending=False, out=None, name=None): ...@@ -380,8 +380,8 @@ def sort(input, axis=-1, descending=False, out=None, name=None):
def where(condition, x, y, name=None): def where(condition, x, y, name=None):
""" """
:alias_main: paddle.where :alias_main: paddle.where
:alias: paddle.where,paddle.tensor.where,paddle.tensor.search.where :alias: paddle.where,paddle.tensor.where,paddle.tensor.search.where
Return a tensor of elements selected from either $x$ or $y$, depending on $condition$. Return a tensor of elements selected from either $x$ or $y$, depending on $condition$.
...@@ -458,8 +458,8 @@ def where(condition, x, y, name=None): ...@@ -458,8 +458,8 @@ def where(condition, x, y, name=None):
def index_sample(x, index): def index_sample(x, index):
""" """
:alias_main: paddle.index_sample :alias_main: paddle.index_sample
:alias: paddle.index_sample,paddle.tensor.index_sample,paddle.tensor.search.index_sample :alias: paddle.index_sample,paddle.tensor.index_sample,paddle.tensor.search.index_sample
**IndexSample Layer** **IndexSample Layer**
......
...@@ -28,8 +28,8 @@ from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_t ...@@ -28,8 +28,8 @@ from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_t
def var(input, axis=None, keepdim=False, unbiased=True, out=None, name=None): def var(input, axis=None, keepdim=False, unbiased=True, out=None, name=None):
""" """
:alias_main: paddle.var :alias_main: paddle.var
:alias: paddle.var,paddle.tensor.var,paddle.tensor.stat.var :alias: paddle.var,paddle.tensor.var,paddle.tensor.stat.var
Computes the variance of the input Variable's elements along the specified Computes the variance of the input Variable's elements along the specified
axis. axis.
...@@ -107,8 +107,8 @@ def var(input, axis=None, keepdim=False, unbiased=True, out=None, name=None): ...@@ -107,8 +107,8 @@ def var(input, axis=None, keepdim=False, unbiased=True, out=None, name=None):
def std(input, axis=None, keepdim=False, unbiased=True, out=None, name=None): def std(input, axis=None, keepdim=False, unbiased=True, out=None, name=None):
""" """
:alias_main: paddle.std :alias_main: paddle.std
:alias: paddle.std,paddle.tensor.std,paddle.tensor.stat.std :alias: paddle.std,paddle.tensor.std,paddle.tensor.stat.std
Computes the standard-deviation of the input Variable's elements along the specified Computes the standard-deviation of the input Variable's elements along the specified
axis. axis.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册