diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 81904a77829535aaab3e2153e93c1e8477f3b37d..295fec557f17b439bad074c633cc3e43b2acf844 100755 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -141,7 +141,7 @@ paddle.fluid.layers.conv2d (ArgSpec(args=['input', 'num_filters', 'filter_size', paddle.fluid.layers.conv3d (ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name', 'data_format'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None, 'NCDHW')), ('document', 'feff9c8ebb4d4d0be5345f9042f57c8e')) paddle.fluid.layers.sequence_pool (ArgSpec(args=['input', 'pool_type', 'is_test', 'pad_value'], varargs=None, keywords=None, defaults=(False, 0.0)), ('document', '5a709f7ef3fdb8fc819d09dc4fbada9a')) paddle.fluid.layers.sequence_softmax (ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'eaa9d0bbd3d4e017c8bc4ecdac483711')) -paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', '7ccaea1b93fe4f7387a6036692986c6b')) +paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', 'f7d6a5173c92c23f9a25cbc58a0eb577')) paddle.fluid.layers.pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive', 'data_format'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True, 'NCHW')), ('document', 'daf9ae55b2d54bd5f35acb397fd1e1b5')) paddle.fluid.layers.pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive', 'data_format'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True, 'NCDHW')), ('document', 'df8edcb8dd020fdddf778c9f613dc650')) paddle.fluid.layers.adaptive_pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', 'd873fdd73bcd74f9203d347cfb90de75')) @@ -187,7 +187,7 @@ paddle.fluid.layers.multiplex (ArgSpec(args=['inputs', 'index'], varargs=None, k paddle.fluid.layers.layer_norm (ArgSpec(args=['input', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None, None)), ('document', '678de6d6d0c93da74189990b039daae8')) paddle.fluid.layers.group_norm (ArgSpec(args=['input', 'groups', 'epsilon', 'param_attr', 'bias_attr', 'act', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(1e-05, None, None, None, 'NCHW', None)), ('document', '87dd4b818f102bc1a780e1804c28bd38')) paddle.fluid.layers.spectral_norm (ArgSpec(args=['weight', 'dim', 'power_iters', 'eps', 'name'], varargs=None, keywords=None, defaults=(0, 1, 1e-12, None)), ('document', '7b3d14d6707d878923847ec617d7d521')) -paddle.fluid.layers.softmax_with_cross_entropy (ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode', 'return_softmax', 'axis'], varargs=None, keywords=None, defaults=(False, -100, True, False, -1)), ('document', '54e1675aa0364f4a78fa72804ec0f413')) +paddle.fluid.layers.softmax_with_cross_entropy (ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode', 'return_softmax', 'axis'], varargs=None, keywords=None, defaults=(False, -100, True, False, -1)), ('document', '6992e4140d667fdf816d0617648b5c00')) paddle.fluid.layers.smooth_l1 (ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'cbe8940643ac80ef75e1abdfbdb09e88')) paddle.fluid.layers.one_hot (ArgSpec(args=['input', 'depth', 'allow_out_of_range'], varargs=None, keywords=None, defaults=(False,)), ('document', 'cdf5dc2078f1e20dc61dd0bec7e28a29')) paddle.fluid.layers.autoincreased_step_counter (ArgSpec(args=['counter_name', 'begin', 'step'], varargs=None, keywords=None, defaults=(None, 1, 1)), ('document', 'd016c137beb9a4528b7378b437d00151')) @@ -221,7 +221,7 @@ paddle.fluid.layers.selu (ArgSpec(args=['x', 'scale', 'alpha', 'name'], varargs= paddle.fluid.layers.log (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '02f668664e3bfc4df6c00d7363467140')) paddle.fluid.layers.crop (ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '32196a194f757b4da114a595a5bc6414')) paddle.fluid.layers.crop_tensor (ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'd460aaf35afbbeb9beea4789aa6e4343')) -paddle.fluid.layers.rank_loss (ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '8eb36596bb43d7a907d3397c7aedbdb3')) +paddle.fluid.layers.rank_loss (ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6d49ba251e23f32cb09df54a851bb960')) paddle.fluid.layers.margin_rank_loss (ArgSpec(args=['label', 'left', 'right', 'margin', 'name'], varargs=None, keywords=None, defaults=(0.1, None)), ('document', '1a177f30e5013fae7ee6c45860cf4946')) paddle.fluid.layers.elu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '9af1926c06711eacef9e82d7a9e4d308')) paddle.fluid.layers.relu6 (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(6.0, None)), ('document', '538fc860b2a1734e118b94e4a1a3ee67')) @@ -586,7 +586,7 @@ paddle.fluid.dygraph.Layer.sublayers (ArgSpec(args=['self', 'include_sublayers'] paddle.fluid.dygraph.Layer.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.__impl__ (ArgSpec(args=['func'], varargs=None, keywords=None, defaults=()), ('document', '75d1d3afccc8b39cdebf05cb1f5969f9')) paddle.fluid.dygraph.guard (ArgSpec(args=['place'], varargs=None, keywords=None, defaults=(None,)), ('document', '7071320ffe2eec9aacdae574951278c6')) -paddle.fluid.dygraph.to_variable (ArgSpec(args=['value', 'block', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '0e69fa3666f15dd01b6e3e270b9371cd')) +paddle.fluid.dygraph.to_variable (ArgSpec(args=['value', 'block', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '7df6297d66295bdc933e3982caa6f1a8')) paddle.fluid.dygraph.Conv2D ('paddle.fluid.dygraph.nn.Conv2D', ('document', '10915f3c643e232d9c6789ce20a96869')) paddle.fluid.dygraph.Conv2D.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'dtype'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv2D.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) @@ -723,7 +723,7 @@ paddle.fluid.dygraph.LayerNorm.set_dict (ArgSpec(args=['self', 'stat_dict', 'inc paddle.fluid.dygraph.LayerNorm.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac')) paddle.fluid.dygraph.LayerNorm.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.LayerNorm.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.fluid.dygraph.NCE ('paddle.fluid.dygraph.nn.NCE', ('document', '993aeea9be436e9c709a758795cb23e9')) +paddle.fluid.dygraph.NCE ('paddle.fluid.dygraph.nn.NCE', ('document', '148e58ba1698e0cd60a3490fd4188d04')) paddle.fluid.dygraph.NCE.__init__ (ArgSpec(args=['self', 'name_scope', 'num_total_classes', 'sample_weight', 'param_attr', 'bias_attr', 'num_neg_samples', 'sampler', 'custom_dist', 'seed', 'is_sparse'], varargs=None, keywords=None, defaults=(None, None, None, None, 'uniform', None, 0, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.NCE.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.NCE.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) @@ -740,7 +740,7 @@ paddle.fluid.dygraph.NCE.set_dict (ArgSpec(args=['self', 'stat_dict', 'include_s paddle.fluid.dygraph.NCE.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '9d689f44592cd22812c7ec06a9654eac')) paddle.fluid.dygraph.NCE.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.NCE.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.fluid.dygraph.PRelu ('paddle.fluid.dygraph.nn.PRelu', ('document', 'da956af1676b08bf15553751a3643b55')) +paddle.fluid.dygraph.PRelu ('paddle.fluid.dygraph.nn.PRelu', ('document', '58141577833fedf619f2f324eea57e00')) paddle.fluid.dygraph.PRelu.__init__ (ArgSpec(args=['self', 'name_scope', 'mode', 'param_attr'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.PRelu.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.PRelu.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) diff --git a/python/paddle/fluid/dygraph/base.py b/python/paddle/fluid/dygraph/base.py index 762b65b551e96f596219000f870c5168a2bd7318..6e8dbc3b8bf33717fe804991a90510dfd3a60411 100644 --- a/python/paddle/fluid/dygraph/base.py +++ b/python/paddle/fluid/dygraph/base.py @@ -150,15 +150,15 @@ def _print_debug_msg(limit=5, is_test=False): @framework.dygraph_only def to_variable(value, block=None, name=None): """ - This function will create a variable from ndarray + The API will create a ``Variable`` object from numpy\.ndarray or Variable object. - Args: - value(ndarray): the numpy value need to be convert - block(fluid.Block|None): which block this variable will be in - name(str|None): Name of Variable + Parameters: + value(ndarray): The numpy\.ndarray object that needs to be converted, it can be multi-dimension, and the data type is one of numpy\.{float16, float32, float64, int16, int32, int64, uint8, uint16}. + block(fluid.Block, optional): Which block this variable will be in. Default: None. + name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` - return: - Variable: The variable created from given numpy + Returns: + Variable: ``Tensor`` created from the specified numpy\.ndarray object, data type and shape is the same as ``value`` . Examples: diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 7af0be58387960c264b32472ddeae3ab5a7ea68b..eb76463292ec04c44993b6afcb75c7d229aa0a50 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -1838,39 +1838,43 @@ class GRUUnit(layers.Layer): class NCE(layers.Layer): """ - Compute and return the noise-contrastive estimation training loss. See + This interface is used to construct a callable object of the ``NCE`` class. + For more details, refer to code examples. + It implements the function of the ``NCE`` loss function. + By default this function uses a uniform distribution for sampling, and it + compute and return the noise-contrastive estimation training loss. See `Noise-contrastive estimation: A new estimation principle for unnormalized statistical models `_ . - By default this operator uses a uniform distribution for sampling. Parameters: name_scope(str): The name of this class. num_total_classes (int): Total number of classes in all samples - param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights + param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter) of nce. If it is set to None or one attribute of ParamAttr, nce will create ParamAttr as param_attr. If the Initializer of the param_attr is not set, the parameter is initialized with Xavier. Default: None. - bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of nce. + bias_attr (ParamAttr or bool, optional): The attribute for the bias of nce. If it is set to False, no bias will be added to the output units. If it is set to None or one attribute of ParamAttr, nce will create ParamAttr as bias_attr. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. - num_neg_samples (int): The number of negative classes. The default value is 10. - sampler (str): The sampler used to sample class from negtive classes. + num_neg_samples (int, optional): The number of negative classes. The default value is 10. + sampler (str, optional): The sampler used to sample class from negtive classes. It can be 'uniform', 'log_uniform' or 'custom_dist'. default: 'uniform'. - custom_dist (float[]|None): A float[] with size=num_total_classes. + custom_dist (float[], optional): A float[] with size=num_total_classes. It is used when sampler is set to 'custom_dist'. custom_dist[i] is the probability of i-th class to be sampled. Default: None. - seed (int): The seed used in sampler. Default: 0. - is_sparse(bool): The flag indicating whether to use sparse update, the weight@GRAD and bias@GRAD will be changed to SelectedRows. Default: False. + seed (int, optional): The seed used in sampler. Default: 0. + is_sparse(bool, optional): The flag indicating whether to use sparse update. If is_sparse is True, the weight@GRAD and bias@GRAD will be changed to SelectedRows. Default: False. - Attributes: - weight (Parameter): the learnable weights of this layer. - bias (Parameter|None): the learnable bias of this layer. + Attribute: + **weight** (Parameter): the learnable weights of this layer. + **bias** (Parameter or None): the learnable bias of this layer. + Returns: - Variable: The output nce loss. + None Examples: .. code-block:: python @@ -2087,6 +2091,10 @@ class NCE(layers.Layer): class PRelu(layers.Layer): """ + This interface is used to construct a callable object of the ``PRelu`` class. + For more details, refer to code examples. + It implements three activation methods of the ``PRelu`` activation function. + Equation: .. math:: @@ -2098,30 +2106,32 @@ class PRelu(layers.Layer): and element. all: all elements share same weight channel:elements in a channel share same weight element:each element has a weight - param_attr(ParamAttr|None): The parameter attribute for the learnable - weight (alpha). - - Attributes: - weight (Parameter): the learnable weights of this layer. + param_attr(ParamAttr, optional): The parameter attribute for the learnable + weight (alpha). Default: None. + Attribute: + **weight** (Parameter): the learnable weights of this layer. + Returns: - Variable: The output tensor with the same shape as input. + None Examples: .. code-block:: python import paddle.fluid as fluid + from paddle.fluid.dygraph.base import to_variable import numpy as np inp_np = np.ones([5, 200, 100, 100]).astype('float32') with fluid.dygraph.guard(): + inp_np = to_variable(inp_np) mode = 'channel' prelu = fluid.PRelu( 'prelu', mode=mode, param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0))) - dy_rlt = prelu(fluid.dygraph.base.to_variable(inp_np)) + dy_rlt = prelu(inp_np) """ diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d8a1411ed0831391198cb816445185e28c115304..20e9334ceeb1ae0e6cf8983fed67367918dcc6a1 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -2324,11 +2324,11 @@ def sequence_softmax(input, use_cudnn=False, name=None): def softmax(input, use_cudnn=False, name=None, axis=-1): """ - The input of the softmax operator is a tensor of any rank. The output tensor - has the same shape as the input. + This operator implements the softmax layer. The calculation process is as follows: - The dimension :attr:`axis` of the input tensor will be permuted to the last. - Then the input tensor will be logically flattened to a 2-D matrix. The matrix's + 1. The dimension :attr:`axis` of the ``input`` will be permuted to the last. + + 2. Then the input tensor will be logically flattened to a 2-D matrix. The matrix's second dimension(row length) is the same as the dimension :attr:`axis` of the input tensor, and the first dimension(column length) is the product of all other dimensions of the input tensor. For each row of the matrix, the softmax operator @@ -2336,6 +2336,9 @@ def softmax(input, use_cudnn=False, name=None, axis=-1): of the input tensor's dimension :attr:`axis`) vector of arbitrary real values to a K-dimensional vector of real values in the range [0, 1] that add up to 1. + 3. After the softmax operation is completed, the inverse operations of steps 1 and 2 + are performed to restore the two-dimensional matrix to the same dimension as the ``input``. + It computes the exponential of the given dimension and the sum of exponential values of all the other dimensions in the K-dimensional vector input. Then the ratio of the exponential of the given dimension and the sum of @@ -2348,20 +2351,66 @@ def softmax(input, use_cudnn=False, name=None, axis=-1): Out[i, j] = \\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])} + Example: + + .. code-block:: text + + Case 1: + Input: + X.shape = [2, 3, 4] + X.data = [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + + Attrs: + axis = -1 + + Output: + Out.shape = [2, 3, 4] + Out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.07232949, 0.19661193, 0.19661193, 0.53444665]], + [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] + + Case 2: + Input: + X.shape = [2, 3, 4] + X.data = [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + Attrs: + axis = 1 + + Output: + Out.shape = [2, 3, 4] + Out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783], + [0.01786798, 0.01786798, 0.04661262, 0.04661262], + [0.97555875, 0.97555875, 0.93623955, 0.93623955]], + [[0.00490169, 0.00490169, 0.00490169, 0.00490169], + [0.26762315, 0.26762315, 0.26762315, 0.26762315], + [0.72747516, 0.72747516, 0.72747516, 0.72747516]]] + Args: - input (Variable): The input variable. A LoDTensor or Tensor with type - float32, float64. - use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn \ + input (Variable): The input variable. A multi-dimension ``Tensor`` with type float32 or float64. + use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn \ library is installed. To improve numerical stablity, set use_cudnn to \ - False by default. Default: False - name (str|None): A name for this layer(optional). If set None, the layer + False by default. + name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Default: None. will be named automatically. Default: None. - axis (int): The index of dimension to perform softmax calculations, it should + axis (int, optional): The index of dimension to perform softmax calculations, it should be in range :math:`[-1, rank - 1]`, while :math:`rank` is the rank of input variable. Default: -1. -1 means the last dimension. Returns: - Variable: output of softmax. A Tensor with type float32, float64. + Variable: ``Tensor`` indicates the output of softmax. The data type and shape are the same as ``input`` . Examples: @@ -2379,7 +2428,6 @@ def softmax(input, use_cudnn=False, name=None, axis=-1): output= exe.run(feed={"input": x}, fetch_list=[result[0]]) print(output) - #array([0.22595254, 0.39276356, 0.38128382], dtype=float32)] """ helper = LayerHelper('softmax', **locals()) if not isinstance(input, Variable): @@ -7804,12 +7852,9 @@ def softmax_with_cross_entropy(logits, return_softmax=False, axis=-1): """ - **Softmax With Cross Entropy Operator.** - - Cross entropy loss with softmax is used as the output layer extensively. This - operator computes the softmax normalized values for dimension :attr:`axis` of - the input tensor, after which cross-entropy loss is computed. This provides - a more numerically stable gradient. + This operator implements the cross entropy loss function with softmax. This function + combines the calculation of the softmax operation and the cross entropy loss function + to provide a more numerically stable gradient. Because this operator performs a softmax on logits internally, it expects unscaled logits. This operator should not be used with the output of @@ -7826,72 +7871,71 @@ def softmax_with_cross_entropy(logits, .. math:: - loss_j = -\\text{logit}_{label_j} + - \\log\\left(\\sum_{i=0}^{K}\\exp(\\text{logit}_i)\\right), j = 1,..., K + loss_j = -\\text{logits}_{label_j} + + \\log\\left(\\sum_{i=0}^{K}\\exp(\\text{logits}_i)\\right), j = 1,..., K 2) Soft label (each sample can have a distribution over all classes) .. math:: loss_j = -\\sum_{i=0}^{K}\\text{label}_i - \\left(\\text{logit}_i - \\log\\left(\\sum_{i=0}^{K} - \\exp(\\text{logit}_i)\\right)\\right), j = 1,...,K + \\left(\\text{logits}_i - \\log\\left(\\sum_{i=0}^{K} + \\exp(\\text{logits}_i)\\right)\\right), j = 1,...,K - 3) If :attr:`numeric_stable_mode` is :attr:`True`, softmax is calculated - first by: + 3) If :attr:`numeric_stable_mode` is :attr:`True`, softmax is calculated first by: .. math:: - max_j &= \\max_{i=0}^{K}{\\text{logit}_i} + max_j &= \\max_{i=0}^{K}{\\text{logits}_i} - log\\_max\\_sum_j &= \\log\\sum_{i=0}^{K}\\exp(logit_i - max_j) + log\\_max\\_sum_j &= \\log\\sum_{i=0}^{K}\\exp(logits_i - max_j) - softmax_j &= \\exp(logit_j - max_j - {log\\_max\\_sum}_j) + softmax_j &= \\exp(logits_j - max_j - {log\\_max\\_sum}_j) and then cross entropy loss is calculated by softmax and label. Args: - logits (Variable): The input tensor of unscaled log probabilities. - label (Variable): The ground truth tensor. If :attr:`soft_label` - is set to :attr:`True`, Label is a Tensor in the - same shape with :attr:`logits`. If :attr:`soft_label` is set to - :attr:`True`, Label is a Tensor in the same shape with - :attr:`logits` expect shape in dimension :attr:`axis` as 1. - soft_label (bool): A flag to indicate whether to interpretate the given + logits (Variable): A multi-dimension ``Tensor`` , and the data type is float32 or float64. The input tensor of unscaled log probabilities. + label (Variable): The ground truth ``Tensor`` , data type is the same + as the ``logits`` . If :attr:`soft_label` is set to :attr:`True`, + Label is a ``Tensor`` in the same shape with :attr:`logits`. + If :attr:`soft_label` is set to :attr:`True`, Label is a ``Tensor`` + in the same shape with :attr:`logits` expect shape in dimension :attr:`axis` as 1. + soft_label (bool, optional): A flag to indicate whether to interpretate the given labels as soft labels. Default False. - ignore_index (int): Specifies a target value that is ignored and does - not contribute to the input gradient. Only valid - if :attr:`soft_label` is set to :attr:`False`. - Default: kIgnoreIndex - numeric_stable_mode (bool): A flag to indicate whether to use a more - numerically stable algorithm. Only valid - when :attr:`soft_label` is :attr:`False` - and GPU is used. When :attr:`soft_label` - is :attr:`True` or CPU is used, the - algorithm is always numerically stable. - Note that the speed may be slower when use - stable algorithm. Default: True - return_softmax (bool): A flag indicating whether to return the softmax - along with the cross entropy loss. Default: False - axis (int): The index of dimension to perform softmax calculations. It - should be in range :math:`[-1, rank - 1]`, while :math:`rank` - is the rank of input :attr:`logits`. Default: -1. + ignore_index (int, optional): Specifies a target value that is ignored and does + not contribute to the input gradient. Only valid + if :attr:`soft_label` is set to :attr:`False`. + Default: kIgnoreIndex(-100). + numeric_stable_mode (bool, optional): A flag to indicate whether to use a more + numerically stable algorithm. Only valid + when :attr:`soft_label` is :attr:`False` + and GPU is used. When :attr:`soft_label` + is :attr:`True` or CPU is used, the + algorithm is always numerically stable. + Note that the speed may be slower when use + stable algorithm. Default: True. + return_softmax (bool, optional): A flag indicating whether to return the softmax + along with the cross entropy loss. Default: False. + axis (int, optional): The index of dimension to perform softmax calculations. It + should be in range :math:`[-1, rank - 1]`, while :math:`rank` + is the rank of input :attr:`logits`. Default: -1. Returns: - Variable or Tuple of two Variables: Return the cross entropy loss if \ - `return_softmax` is False, otherwise the tuple \ - (loss, softmax), softmax is in the same shape \ - with input logits and cross entropy loss is in \ - the same shape with input logits except shape \ - in dimension :attr:`axis` as 1. + ``Variable`` or Tuple of two ``Variable`` : Return the cross entropy loss if \ + `return_softmax` is False, otherwise the tuple \ + (loss, softmax), softmax is in the same shape \ + with input logits and cross entropy loss is in \ + the same shape with input logits except shape \ + in dimension :attr:`axis` as 1. Examples: .. code-block:: python import paddle.fluid as fluid - data = fluid.layers.data(name='data', shape=[128], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + data = fluid.data(name='data', shape=[-1, 128], dtype='float32') + label = fluid.data(name='label', shape=[-1, 1], dtype='int64') fc = fluid.layers.fc(input=data, size=100) out = fluid.layers.softmax_with_cross_entropy( logits=fc, label=label) @@ -11039,54 +11083,45 @@ def affine_grid(theta, out_shape, name=None): def rank_loss(label, left, right, name=None): """ - - **Rank loss layer for RankNet** - + This operator implements the sort loss layer in the RankNet model. RankNet is a pairwise ranking model + with a training sample consisting of a pair of documents (A and B), The label (P) + indicates whether A is ranked higher than B or not. Please refer to more details: `RankNet `_ - is a pairwise ranking model with a training sample consisting of a pair - of documents, A and B. Label P indicates whether A is ranked higher than B - or not: - - P = {0, 1} or {0, 0.5, 1}, where 0.5 means that there is no information - about the rank of the input pair. Rank loss layer takes three inputs: left ( :math:`o_i` ), right ( :math:`o_j` ) and label ( :math:`P_{i,j}` ). The inputs respectively represent RankNet's output scores - for documents A and B and the value of label P. The following equation - computes rank loss C_{i,j} from the inputs: + for documents A and B and the value of label P. Rank loss layer takes batch inputs + with size batch_size (batch_size >= 1), P = {0, 1} or {0, 0.5, 1}, + where 0.5 means that there is no information about the rank of the input pair. + The following equation computes rank loss C_{i,j} from the inputs: .. math:: - C_{i,j} &= -\\tilde{P_{ij}} * o_{i,j} + \log(1 + e^{o_{i,j}}) \\\\ - + .. math:: o_{i,j} &= o_i - o_j \\\\ - + .. math:: \\tilde{P_{i,j}} &= \\left \{0, 0.5, 1 \\right \} \ or \ \\left \{0, 1 \\right \} - - Rank loss layer takes batch inputs with size batch_size (batch_size >= 1). - - Args: - label (Variable): Indicats whether A ranked higher than B or not. - left (Variable): RankNet's output score for doc A. - right (Variable): RankNet's output score for doc B. - name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + Parameters: + label (Variable): 2-D ``Tensor`` with the shape of :math:`[batch,1]`, the data type is float32, batch indicates the size of the data. Indicats whether A ranked higher than B or not. + left (Variable): 2-D ``Tensor`` with the shape of :math:`[batch,1]`, the data type is float32. RankNet's output score for doc A. + right (Variable): 2-D ``Tensor`` with the shape of :math:`[batch,1]`, the data type is float32. RankNet's output score for doc B. + name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: - list: The value of rank loss. + Variable: ``Tensor`` indicating the output value of the sort loss layer, the data type is float32, and the return value's shape is :math:`[batch,1]` . Raises: - ValueError: Any of label, left, and right is not a variable. + ValueError: Any of label, left, and right is not a ``Variable`` . Examples: .. code-block:: python import paddle.fluid as fluid - label = fluid.layers.data(name="label", shape=[-1, 1], dtype="float32") - left = fluid.layers.data(name="left", shape=[-1, 1], dtype="float32") - right = fluid.layers.data(name="right", shape=[-1, 1], dtype="float32") + label = fluid.data(name="label", shape=[-1, 1], dtype="float32") + left = fluid.data(name="left", shape=[-1, 1], dtype="float32") + right = fluid.data(name="right", shape=[-1, 1], dtype="float32") out = fluid.layers.rank_loss(label, left, right) """