loss.py 155.3 KB
Newer Older
1
# -*- coding: utf-8 -*
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16
import paddle
17
from ...fluid.data_feeder import check_variable_and_dtype
18

19
# TODO: define loss functions of neural network
20 21 22
import paddle
import paddle.fluid as fluid
from ...fluid.layers.nn import _elementwise_op_in_dygraph
23
from ...tensor.manipulation import reshape
24
from ...fluid.layer_helper import LayerHelper
25
from ...fluid.framework import _varbase_creator
26
from ...static import Variable
27
from paddle.utils import deprecated
28
from paddle import _C_ops, _legacy_C_ops
Z
zhiboniu 已提交
29
from paddle import in_dynamic_mode
Y
yangguohao 已提交
30
from paddle.framework import core, _non_static_mode
31
from ...fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _non_static_mode, _current_expected_place
32

33 34
__all__ = []

35

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
def dice_loss(input, label, epsilon=0.00001, name=None):
    r"""

    Dice loss for comparing the similarity between the input predictions and the label.
    This implementation is for binary classification, where the input is sigmoid
    predictions of each pixel, usually used for segmentation task. The dice loss can
    be defined as the following equation:

    .. math::

        dice\_loss &= 1 - \frac{2 * intersection\_area}{total\_area} \\
                  &= \frac{(total\_area - intersection\_area) - intersection\_area}{total\_area} \\
                  &= \frac{(union\_area - intersection\_area)}{total\_area}


    Parameters:
        input (Tensor): Tensor, rank>=2, shape is :math:`[N_1, N_2, ..., N_k, D]`, where :math:`N_1` is
                          the batch_size, :math:`D` is the number of categories. It is usually the output
                          predictions of sigmoid activation. The data type can be float32 or float64.
        label (Tensor): Tensor, the groud truth with the same rank as input, shape is :math:`[N_1, N_2, ..., N_k, 1]`.
                          where :math:`N_1` is the batch_size. The data type can be int32 or int64.
        epsilon (float): The epsilon will be added to the numerator and denominator.
                         If both input and label are empty, it makes sure dice is 1.
                         Default: 0.00001
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`

    Returns:
        Tensor, which shape is [1], data type is the same as `input` .

    Example:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            x = paddle.randn((3,224,224,2))
            label = paddle.randint(high=2, shape=(3,224,224,1))
            predictions = F.softmax(x)
            loss = F.dice_loss(input=predictions, label=label)
    """
    assert input.dtype in (paddle.float32, paddle.float64)
    assert label.dtype in (paddle.int32, paddle.int64)
    assert len(input.shape) >= 2, \
        "The rank of input should be greater than or equal to 2."
82 83 84 85
    assert len(input.shape) == len(
        label.shape), ("The rank of input and label should be equal, "
                       "but received input: %d, label: %d." %
                       (len(input.shape), len(label.shape)))
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
    assert label.shape[-1] == 1, ("The last dimension of label should be 1, "
                                  "but received %d." % label.shape[-1])
    assert input.shape[:-1] == label.shape[:-1], (
        "All dimensions should be equal except the last one.")
    assert input.numel() > 0 and label.numel() > 0, \
        "Any dimension of input and label cannot be equal to 0."

    label = paddle.squeeze(label, [-1])
    label = paddle.nn.functional.one_hot(label, input.shape[-1])
    reduce_dim = list(range(1, len(input.shape)))
    inse = paddle.sum(input * label, axis=reduce_dim)
    dice_denominator = paddle.sum(input, axis=reduce_dim) + paddle.sum(
        label, axis=reduce_dim)
    dice_score = 1 - inse * 2 / (dice_denominator + epsilon)
    return paddle.mean(dice_score)


def log_loss(input, label, epsilon=1e-4, name=None):
    r"""

    **Negative Log Loss Layer**

    This layer accepts input predictions and target label and returns the
    negative log loss.

    .. math::

        Out = -label * \log{(input + \epsilon)}
              - (1 - label) * \log{(1 - input + \epsilon)}

    Args:
        input (Tensor|list):  A 2-D tensor with shape [N x 1], where N is the
                                batch size. This input is a probability computed
                                by the previous operator. Data type float32.
        label (Tensor|list):  The ground truth which is a 2-D tensor with
                                shape [N x 1], where N is the batch size.
                                Data type float32.
        epsilon (float, optional): A small number for numerical stability. Default 1e-4.
        name(str|None): For detailed information, please refer to
            :ref:`api_guide_Name` . Usually name is no need to set and None by default.

    Returns:
        Tensor, which shape is [N x 1], data type is float32.

    Examples:
        .. code-block:: python

          import paddle
          import paddle.nn.functional as F

          label = paddle.randn((10,1))
          prob = paddle.randn((10,1))
          cost = F.log_loss(input=prob, label=label)
    """
    if in_dygraph_mode():
141
        return _C_ops.log_loss(input, label, epsilon)
142 143 144 145 146 147 148

    helper = LayerHelper('log_loss', **locals())
    check_variable_and_dtype(input, 'input', ['float32'], 'log_loss')
    check_variable_and_dtype(label, 'label', ['float32'], 'log_loss')

    loss = helper.create_variable_for_type_inference(dtype=input.dtype)

149 150 151 152 153 154 155
    helper.append_op(type='log_loss',
                     inputs={
                         'Predicted': [input],
                         'Labels': [label]
                     },
                     outputs={'Loss': [loss]},
                     attrs={'epsilon': epsilon})
156 157 158 159 160 161 162 163 164 165 166 167
    return loss


def fluid_softmax_with_cross_entropy(logits,
                                     label,
                                     soft_label=False,
                                     ignore_index=-100,
                                     numeric_stable_mode=True,
                                     return_softmax=False,
                                     axis=-1):
    r"""

168 169
    This operator implements the cross entropy loss function with softmax. This function
    combines the calculation of the softmax operation and the cross entropy loss function
170 171 172 173 174 175
    to provide a more numerically stable gradient.

    Because this operator performs a softmax on logits internally, it expects
    unscaled logits. This operator should not be used with the output of
    softmax operator since that would produce incorrect results.

176 177 178
    When the attribute :attr:`soft_label` is set :attr:`False`, this operators
    expects mutually exclusive hard labels, each sample in a batch is in exactly
    one class with a probability of 1.0. Each sample in the batch will have a
179 180 181 182 183 184 185
    single label.

    The equation is as follows:

    1) Hard label (one-hot label, so every sample has exactly one class)

    .. math::
186
        \\loss_j=-\text{logits}_{label_j} +\log\left(\sum_{i=0}^{K}\exp(\text{logits}_i)\right), j = 1,..., K
187 188 189 190

    2) Soft label (each sample can have a distribution over all classes)

    .. math::
191
        \\loss_j= -\sum_{i=0}^{K}\text{label}_i\left(\text{logits}_i - \log\left(\sum_{i=0}^{K}\exp(\text{logits}_i)\right)\right), j = 1,...,K
192 193 194 195

    3) If :attr:`numeric_stable_mode` is :attr:`True`, softmax is calculated first by:

    .. math::
196 197 198
        \\max_j&=\max_{i=0}^{K}{\text{logits}_i} \\
                log\_max\_sum_j &= \log\sum_{i=0}^{K}\exp(logits_i - max_j)\\
                softmax_j &= \exp(logits_j - max_j - {log\_max\_sum}_j)
199 200 201 202 203 204

    and then cross entropy loss is calculated by softmax and label.

    Args:
        logits (Tensor): A multi-dimension ``Tensor`` , and the data type is float32 or float64. The input tensor of unscaled log probabilities.
        label (Tensor): The ground truth  ``Tensor`` , data type is the same
205 206 207
            as the ``logits`` . If :attr:`soft_label` is set to :attr:`True`,
            Label is a ``Tensor``  in the same shape with :attr:`logits`.
            If :attr:`soft_label` is set to :attr:`True`, Label is a ``Tensor``
208 209 210 211 212
            in the same shape with :attr:`logits` expect shape in dimension :attr:`axis` as 1.
        soft_label (bool, optional): A flag to indicate whether to interpretant the given
            labels as soft labels. Default False.
        ignore_index (int, optional): Specifies a target value that is ignored and does
                                      not contribute to the input gradient. Only valid
213
                                      if :attr:`soft_label` is set to :attr:`False`.
214 215 216
                                      Default: kIgnoreIndex(-100).
        numeric_stable_mode (bool, optional): A flag to indicate whether to use a more
                                              numerically stable algorithm. Only valid
217 218 219
                                              when :attr:`soft_label` is :attr:`False`
                                              and GPU is used. When :attr:`soft_label`
                                              is :attr:`True` or CPU is used, the
220 221 222 223 224
                                              algorithm is always numerically stable.
                                              Note that the speed may be slower when use
                                              stable algorithm. Default: True.
        return_softmax (bool, optional): A flag indicating whether to return the softmax
                                         along with the cross entropy loss. Default: False.
225
        axis (int, optional): The index of dimension to perform softmax calculations. It
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
                              should be in range :math:`[-1, rank - 1]`, while :math:`rank`
                              is the rank of input :attr:`logits`. Default: -1.

    Returns:
        ``Tensor`` or Tuple of two ``Tensor`` : Return the cross entropy loss if \
                                                    `return_softmax` is False, otherwise the tuple \
                                                    (loss, softmax), softmax is in the same shape \
                                                    with input logits and cross entropy loss is in \
                                                    the same shape with input logits except shape \
                                                    in dimension :attr:`axis` as 1.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np

            data = np.random.rand(128).astype("float32")
            label = np.random.rand(1).astype("int64")
            data = paddle.to_tensor(data)
            label = paddle.to_tensor(label)
            linear = paddle.nn.Linear(128, 100)
            x = linear(data)
            out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label)
            print(out)
    """
    if _non_static_mode():
        if core.is_compiled_with_npu():
254
            softmax, backprop, loss = _legacy_C_ops.softmax_with_cross_entropy(
255 256 257 258 259
                logits, label, 'soft_label', soft_label, 'ignore_index',
                ignore_index, 'numeric_stable_mode', numeric_stable_mode,
                'axis', axis)
        else:
            if in_dygraph_mode():
260
                softmax, loss = _C_ops.cross_entropy_with_softmax(
261 262 263
                    logits, label, soft_label, True, numeric_stable_mode,
                    ignore_index, axis)
            if _in_legacy_dygraph():
264
                softmax, loss = _legacy_C_ops.softmax_with_cross_entropy(
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
                    logits, label, 'soft_label', soft_label, 'ignore_index',
                    ignore_index, 'numeric_stable_mode', numeric_stable_mode,
                    'axis', axis)
        if not return_softmax:
            return loss
        else:
            return loss, softmax

    attrs = {
        'soft_label': soft_label,
        'ignore_index': ignore_index,
        'numeric_stable_mode': numeric_stable_mode,
        'axis': axis
    }
    helper = LayerHelper('softmax_with_cross_entropy', **locals())
    softmax = helper.create_variable_for_type_inference(dtype=logits.dtype)
    loss = helper.create_variable_for_type_inference(dtype=logits.dtype)

    outputs = {'Softmax': softmax, 'Loss': loss}
    if core.is_compiled_with_npu() or core.is_compiled_with_mlu():
        backprop = helper.create_variable_for_type_inference(dtype=logits.dtype)
        outputs['Backprop'] = backprop
287 288 289 290 291 292 293
    helper.append_op(type='softmax_with_cross_entropy',
                     inputs={
                         'Logits': logits,
                         'Label': label
                     },
                     outputs=outputs,
                     attrs=attrs)
294 295 296 297 298 299 300 301

    if return_softmax:
        return loss, softmax

    return loss


def npair_loss(anchor, positive, labels, l2_reg=0.002):
302 303
    """

304 305 306
    Npair loss requires paired data. Npair loss has two parts: the first part is L2
    regularizer on the embedding vector; the second part is cross entropy loss which
    takes the similarity matrix of anchor and positive as logits.
307

308 309
    For more information, please refer to:
    `Improved Deep Metric Learning with Multi class N pair Loss Objective <http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/nips16_npairmetriclearning.pdf>`_
310

311
    Args:
312
      anchor(Tensor): embedding vector for the anchor image. shape=[batch_size, embedding_dims],
313
                        the data type is float32 or float64.
314
      positive(Tensor): embedding vector for the positive image. shape=[batch_size, embedding_dims],
315 316 317 318
                        the data type is float32 or float64.
      labels(Tensor): 1-D tensor. shape=[batch_size], the data type is float32 or float64 or int64.
      l2_reg(float32): L2 regularization term on embedding vector, default: 0.002.

319

320 321
    Returns:
      A Tensor representing the npair loss, the data type is the same as anchor, the shape is [1].
322

323 324 325
    Examples:

      .. code-block:: python
326

327
          import paddle
328

329
          DATATYPE = "float32"
330

331 332 333
          anchor = paddle.rand(shape=(18, 6), dtype=DATATYPE)
          positive = paddle.rand(shape=(18, 6), dtype=DATATYPE)
          labels = paddle.rand(shape=(18,), dtype=DATATYPE)
334

335 336
          npair_loss = paddle.nn.functional.npair_loss(anchor, positive, labels, l2_reg = 0.002)
          print(npair_loss)
337

338 339 340 341 342 343 344 345 346 347 348 349 350
    """
    check_variable_and_dtype(anchor, 'anchor', ['float32', 'float64'],
                             'npair_loss')
    check_variable_and_dtype(positive, 'positive', ['float32', 'float64'],
                             'positive')
    check_variable_and_dtype(labels, 'labels', ['float32', 'float64', 'int64'],
                             'labels')
    Beta = 0.25
    batch_size = labels.shape[0]

    labels = paddle.reshape(labels, shape=[batch_size, 1])
    labels = paddle.tile(labels, repeat_times=[1, batch_size])

351 352 353
    labels = paddle.equal(labels, paddle.transpose(labels,
                                                   perm=[1,
                                                         0])).astype('float32')
354 355 356 357 358 359
    labels = labels / paddle.sum(labels, axis=1, keepdim=True)

    l2loss = paddle.mean(paddle.sum(paddle.square(anchor), 1)) \
             + paddle.mean(paddle.sum(paddle.square(positive), 1))
    l2loss = l2loss * Beta * l2_reg

360 361 362 363 364 365 366
    similarity_matrix = paddle.matmul(anchor,
                                      positive,
                                      transpose_x=False,
                                      transpose_y=True)
    softmax_ce = fluid_softmax_with_cross_entropy(logits=similarity_matrix,
                                                  label=labels,
                                                  soft_label=True)
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
    cross_entropy = paddle.sum(labels * softmax_ce, 0)
    celoss = paddle.mean(cross_entropy)

    return l2loss + celoss


def square_error_cost(input, label):
    r"""

    This op accepts input predictions and target label and returns the
    squared error cost.

    For predictions label, and target label, the equation is:

    .. math::

        Out = (input - label)^2

    Parameters:
        input (Tensor): Input tensor, the data type should be float32.
        label (Tensor): Label tensor, the data type should be float32.

    Returns:
390 391
        Tensor, The tensor storing the element-wise squared error
        difference between input and label.
392 393 394 395 396 397 398 399 400 401 402 403 404

    Examples:

        .. code-block:: python

            import paddle
            input = paddle.to_tensor([1.1, 1.9])
            label = paddle.to_tensor([1.0, 2.0])
            output = paddle.nn.functional.square_error_cost(input, label)
            print(output)
            # [0.01, 0.01]

    """
405
    if in_dygraph_mode():
406 407
        minus_out = _C_ops.subtract(input, label)
        square_out = _C_ops.square(minus_out)
408 409
        return square_out
    elif _in_legacy_dygraph():
410 411
        minus_out = _legacy_C_ops.elementwise_sub(input, label)
        square_out = _legacy_C_ops.square(minus_out)
412 413 414 415 416 417 418 419
        return square_out

    check_variable_and_dtype(input, "input", ['float32', 'float64'],
                             'square_error_cost')
    check_variable_and_dtype(label, "label", ['float32', 'float64'],
                             'square_error_cost')
    helper = LayerHelper('square_error_cost', **locals())
    minus_out = helper.create_variable_for_type_inference(dtype=input.dtype)
420 421 422 423 424 425
    helper.append_op(type='elementwise_sub',
                     inputs={
                         'X': [input],
                         'Y': [label]
                     },
                     outputs={'Out': [minus_out]})
426 427

    square_out = helper.create_variable_for_type_inference(dtype=input.dtype)
428 429 430
    helper.append_op(type='square',
                     inputs={'X': [minus_out]},
                     outputs={'Out': [square_out]})
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
    return square_out


def edit_distance(input,
                  label,
                  normalized=True,
                  ignored_tokens=None,
                  input_length=None,
                  label_length=None):
    """
    This op computes the edit distances, also called Levenshtein distance, between a batch of
    hypothesis strings and their references. It measures how dissimilar two strings are by counting
    the minimum number of operations to transform one string into another.
    The operations include insertion, deletion, and substitution.

    For example, given hypothesis string A = "kitten" and reference
    B = "sitting", A will be transformed into B
    at least after two substitutions and one insertion:

    "kitten" -> "sitten" -> "sittin" -> "sitting"

    So the edit distance between A and B is 3.

    The input is a Tensor, the input_length and label_length should be supported.

    The `batch_size` of labels should be same as `input`.

    The output include the edit distance value between every pair of input and related label, and the number of sequence.
    If Attr(normalized) is true,
    the edit distance value will be divided by the length of label.

    Parameters:
        input(Tensor): The input tensor, its rank should be equal to 2 and its data type should be int64.
        label(Tensor): The label tensor, its rank should be equal to 2 and its data type should be int64.
        normalized(bool, default True): Indicated whether to normalize the edit distance.
        ignored_tokens(list<int>, default None): Tokens that will be removed before
                                     calculating edit distance.
        input_length(Tensor): The length for each sequence in `input` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
        label_length(Tensor): The length for each sequence in `label` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
        NOTE: To be avoid unexpected result, the value of every elements in input_length and label_length should be equal to the value of the second dimension of input and label. For example, The input: [[1,2,3,4],[5,6,7,8],[9,10,11,12]], the shape of input is [3,4] and the input_length should be [4,4,4]
        NOTE: This Api is different from fluid.metrics.EditDistance

    Returns:
474 475 476
        Tuple:
            distance(Tensor): edit distance result, its data type is float32, and its shape is (batch_size, 1).
            sequence_num(Tensor): sequence number, its data type is float32, and its shape is (1,).
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            input = paddle.to_tensor([[1,2,3],[4,5,6],[4,4,4],[1,1,1]], dtype='int64')
            label = paddle.to_tensor([[1,3,4,1],[4,5,8,1],[7,7,7,1],[1,1,1,1]], dtype='int64')
            input_len = paddle.to_tensor([3,3,3,3], dtype='int64')
            label_len = paddle.to_tensor([4,4,4,4], dtype='int64')

            distance, sequence_num = F.loss.edit_distance(input=input, label=label, input_length=input_len, label_length=label_len, normalized=False)

            # print(distance)
            # [[3.]
            #  [2.]
            #  [4.]
            #  [1.]]
            # if set normalized to True
            # [[0.75]
            #  [0.5 ]
            #  [1.  ]
            #  [0.25]
            #
            # print(sequence_num)
            # [4]

    """
    check_variable_and_dtype(input, 'input', ['int64'], 'edit_distance')
    check_variable_and_dtype(label, 'label', ['int64'], 'edit_distance')
    helper = LayerHelper("edit_distance", **locals())

    # remove some tokens from input and labels
    if ignored_tokens is not None and len(ignored_tokens) > 0:
        erased_input = helper.create_variable_for_type_inference(dtype="int64")
        erased_label = helper.create_variable_for_type_inference(dtype="int64")

515 516 517 518
        helper.append_op(type="sequence_erase",
                         inputs={"X": [input]},
                         outputs={"Out": [erased_input]},
                         attrs={"tokens": ignored_tokens})
519 520
        input = erased_input

521 522 523 524
        helper.append_op(type="sequence_erase",
                         inputs={"X": [label]},
                         outputs={"Out": [erased_label]},
                         attrs={"tokens": ignored_tokens})
525 526
        label = erased_label

Z
zhiboniu 已提交
527
    if in_dygraph_mode():
528 529
        return _C_ops.edit_distance(input, label, input_length, label_length,
                                    normalized)
Z
zhiboniu 已提交
530

531 532 533 534 535 536 537 538
    this_inputs = {"Hyps": [input], "Refs": [label]}
    if input_length is not None and label_length is not None:
        this_inputs['HypsLength'] = [input_length]
        this_inputs['RefsLength'] = [label_length]

    # edit distance op
    edit_distance_out = helper.create_variable_for_type_inference(dtype="int64")
    sequence_num = helper.create_variable_for_type_inference(dtype="int64")
539 540 541 542 543 544 545
    helper.append_op(type="edit_distance",
                     inputs=this_inputs,
                     outputs={
                         "Out": [edit_distance_out],
                         "SequenceNum": [sequence_num]
                     },
                     attrs={"normalized": normalized})
546 547 548 549

    return edit_distance_out, sequence_num


550 551 552 553
def binary_cross_entropy(input,
                         label,
                         weight=None,
                         reduction='mean',
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
                         name=None):
    """
    This op measures the binary_cross_entropy loss between input predictions ``input``
    and target labels ``label`` . The binary_cross_entropy loss can be described as:

    If :attr:`weight` is set, the loss is:

    .. math::
        Out = -1 * weight * (label * log(input) + (1 - label) * log(1 - input))

    If :attr:`weight` is None, the loss is:

    .. math::
        Out = -1 * (label * log(input) + (1 - label) * log(1 - input))

    If :attr:`reduction` set to ``'none'``, the interface will return the original loss `Out`.

    If :attr:`reduction` set to ``'mean'``, the reduced mean loss is:

    .. math::
        Out = MEAN(Out)

    If :attr:`reduction` set to ``'sum'``, the reduced sum loss is:

    .. math::
        Out = SUM(Out)

    Note that the input predictions ``input`` always be the output of sigmoid, and the target labels ``label``
    should be numbers between 0 and 1.

    Parameters:
        input (Tensor): The input predications tensor. 2-D tensor with shape: [N, *],
            N is batch_size, `*` means number of additional dimensions. The ``input``
            should always be the output of sigmod.  Available dtype is float32, float64.
        label (Tensor): The target labels tensor. 2-D tensor with the same shape as
            ``input``. The target labels which values should be numbers between 0 and 1.
            Available dtype is float32, float64.
        weight (Tensor, optional): A manual rescaling weight given to the loss of each
            batch element. If given, has to be a Tensor of size nbatch and the data type
            is float32, float64. Default is ``'None'``.
        reduction (str, optional): Indicate how to average the loss by batch_size,
            the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
            If :attr:`reduction` is ``'none'``, the unreduced loss is returned;
            If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
            If :attr:`reduction` is ``'sum'``, the summed loss is returned.
            Default is ``'mean'``.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.


    Returns:
        output (Tensor): If ``reduction`` is ``'none'``, the shape of output is
            same as ``input`` , else the shape of output is scalar.

    Examples:
        .. code-block:: python

            import paddle

613 614
            input = paddle.to_tensor([0.5, 0.6, 0.7], 'float32')
            label = paddle.to_tensor([1.0, 0.0, 1.0], 'float32')
615
            output = paddle.nn.functional.binary_cross_entropy(input, label)
N
Noel 已提交
616
            print(output)  # [0.65537095]
617 618 619 620 621 622 623 624

    """
    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
            "The value of 'reduction' in binary_cross_entropy should be 'sum', "
            "'mean' or 'none', but received %s, which is not allowed." %
            reduction)

J
Jiabin Yang 已提交
625
    if in_dygraph_mode():
626
        out = _C_ops.bce_loss(input, label)
627
        if weight is not None:
628
            out = _C_ops.multiply(out, weight, 'axis', -1)
629 630

        if reduction == 'sum':
631
            return _C_ops.sum(out, [], None, False)
632

633
        elif reduction == 'mean':
634
            return _C_ops.mean_all(out)
635 636 637
        else:
            return out
    else:
J
Jiabin Yang 已提交
638
        if _in_legacy_dygraph():
639
            out = _legacy_C_ops.bce_loss(input, label)
J
Jiabin Yang 已提交
640
            if weight is not None:
641
                out = _legacy_C_ops.elementwise_mul(out, weight, 'axis', -1)
J
Jiabin Yang 已提交
642
            if reduction == 'sum':
643 644
                return _legacy_C_ops.reduce_sum(out, 'dim', [0], 'keep_dim',
                                                False, "reduce_all", True)
J
Jiabin Yang 已提交
645
            elif reduction == 'mean':
646
                return _legacy_C_ops.mean(out)
J
Jiabin Yang 已提交
647 648 649
            else:
                return out
        else:
650 651 652 653
            check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                                     'binary_cross_entropy')
            check_variable_and_dtype(label, 'label', ['float32', 'float64'],
                                     'binary_cross_entropy')
J
Jiabin Yang 已提交
654 655 656 657

            sub_name = name if weight is None and reduction == 'none' else None
            helper = LayerHelper("binary_cross_entropy", name=sub_name)
            out = helper.create_variable_for_type_inference(dtype=input.dtype)
658 659 660 661 662 663
            helper.append_op(type='bce_loss',
                             inputs={
                                 'X': [input],
                                 'Label': [label],
                             },
                             outputs={'Out': [out]})
J
Jiabin Yang 已提交
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678

            if weight is not None:
                if isinstance(weight, paddle.static.Variable):
                    weight_name = name if reduction == 'none' else None
                    out = paddle.multiply(out, weight, name=weight_name)
                else:
                    raise ValueError(
                        "The weight is not a Tensor, please convert to Tensor.")

            if reduction == 'sum':
                return paddle.sum(out, name=name)
            elif reduction == 'mean':
                return paddle.mean(out, name=name)
            else:
                return out
679 680


681 682 683 684 685 686
def binary_cross_entropy_with_logits(logit,
                                     label,
                                     weight=None,
                                     reduction='mean',
                                     pos_weight=None,
                                     name=None):
687
    r"""
688 689 690 691 692 693 694 695 696 697 698 699 700
    This operator combines the sigmoid layer and the :ref:`api_nn_loss_BCELoss` layer.
    Also, we can see it as the combine of ``sigmoid_cross_entropy_with_logits``
    layer and some reduce operations.

    This measures the element-wise probability error in classification tasks
    in which each class is independent.
    This can be thought of as predicting labels for a data-point, where labels
    are not mutually exclusive. For example, a news article can be about
    politics, technology or sports at the same time or none of these.

    First this operator calculate loss function as follows:

    .. math::
701
           Out = -Labels * \log(\sigma(Logit)) - (1 - Labels) * \log(1 - \sigma(Logit))
702

703
    We know that :math:`\sigma(Logit) = \frac{1}{1 + e^{-Logit}}`. By substituting this we get:
704 705

    .. math::
706
           Out = Logit - Logit * Labels + \log(1 + e^{-Logit})
707

N
Noel 已提交
708
    For stability and to prevent overflow of :math:`e^{-Logit}` when Logit < 0,
709 710 711
    we reformulate the loss as follows:

    .. math::
712
           Out = \max(Logit, 0) - Logit * Labels + \log(1 + e^{-\|Logit\|})
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756

    Then, if ``weight`` or ``pos_weight`` is not None, this operator multiply the
    weight tensor on the loss `Out`. The ``weight`` tensor will attach different
    weight on every items in the batch. The ``pos_weight`` will attach different
    weight on the positive label of each class.

    Finally, this operator applies reduce operation on the loss.
    If :attr:`reduction` set to ``'none'``, the operator will return the original loss `Out`.
    If :attr:`reduction` set to ``'mean'``, the reduced mean loss is :math:`Out = MEAN(Out)`.
    If :attr:`reduction` set to ``'sum'``, the reduced sum loss is :math:`Out = SUM(Out)`.

    Note that the target labels ``label`` should be numbers between 0 and 1.

    Args:
        logit (Tensor): The input predications tensor. 2-D tensor with shape: [N, *],
            N is batch_size, `*` means number of additional dimensions. The ``logit``
            is usually the output of Linear layer. Available dtype is float32, float64.
        label (Tensor): The target labels tensor. 2-D tensor with the same shape as
            ``logit``. The target labels which values should be numbers between 0 and 1.
            Available dtype is float32, float64.
        weight (Tensor, optional): A manual rescaling weight given to the loss of each
            batch element. If given, it has to be a 1D Tensor whose size is `[N, ]`,
            The data type is float32, float64. Default is ``'None'``.
        reduction (str, optional): Indicate how to average the loss by batch_size,
            the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
            If :attr:`reduction` is ``'none'``, the unreduced loss is returned;
            If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
            If :attr:`reduction` is ``'sum'``, the summed loss is returned.
            Default is ``'mean'``.
        pos_weight (Tensor, optional): A weight of positive examples. Must be a vector
            with length equal to the number of classes. The data type is float32, float64.
            Default is ``'None'``.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        output (Tensor): If ``reduction`` is ``'none'``, the shape of output is
            same as ``logit`` , else the shape of output is scalar.

    Examples:

        .. code-block:: python

            import paddle
N
Noel 已提交
757

758 759
            logit = paddle.to_tensor([5.0, 1.0, 3.0])
            label = paddle.to_tensor([1.0, 0.0, 1.0])
760
            output = paddle.nn.functional.binary_cross_entropy_with_logits(logit, label)
N
Noel 已提交
761
            print(output)  # [0.45618808]
762 763 764 765 766 767 768 769

    """
    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
            "The value of 'reduction' in binary_cross_entropy_with_logits "
            "should be 'sum', 'mean' or 'none', but received %s, which is not allowed."
            % reduction)

770
    if in_dygraph_mode():
771 772 773 774
        one = _C_ops.full([1], float(1.0), core.VarDesc.VarType.FP32,
                          _current_expected_place())
        out = _C_ops.sigmoid_cross_entropy_with_logits(logit, label, False,
                                                       -100)
775
        if pos_weight is not None:
776 777 778
            log_weight = _C_ops.add(
                _C_ops.multiply(label, _C_ops.subtract(pos_weight, one)), one)
            out = _C_ops.multiply(out, log_weight)
779
        if weight is not None:
780
            out = _C_ops.multiply(out, weight)
781 782

        if reduction == "sum":
783
            return _C_ops.sum(out, [], None, False)
784
        elif reduction == "mean":
785
            return _C_ops.mean_all(out)
H
hong 已提交
786
        else:
787 788 789
            return out
    elif _in_legacy_dygraph():
        one = _varbase_creator(dtype=logit.dtype)
790 791 792 793
        _legacy_C_ops.fill_constant(one, 'value', float(1.0), 'force_cpu',
                                    False, 'dtype', one.dtype, 'str_value',
                                    '1.0', 'shape', [1])
        out = _legacy_C_ops.sigmoid_cross_entropy_with_logits(logit, label)
794
        if pos_weight is not None:
795 796 797 798
            log_weight = _legacy_C_ops.elementwise_add(
                _legacy_C_ops.elementwise_mul(
                    label, _legacy_C_ops.elementwise_sub(pos_weight, one)), one)
            out = _legacy_C_ops.elementwise_mul(out, log_weight)
799
        if weight is not None:
800
            out = _legacy_C_ops.elementwise_mul(out, weight)
801 802

        if reduction == "sum":
803
            return _legacy_C_ops.reduce_sum(out, 'reduce_all', True)
804
        elif reduction == "mean":
805
            return _legacy_C_ops.mean(out)
806 807 808
        else:
            return out

809 810 811 812
    check_variable_and_dtype(logit, 'logit', ['float32', 'float64'],
                             'binary_cross_entropy_with_logits')
    check_variable_and_dtype(label, 'label', ['float32', 'float64'],
                             'binary_cross_entropy_with_logits')
813 814 815 816
    sigmoid_name = None
    if reduction == 'none' and pos_weight is None and weight is None:
        sigmoid_name = name

817
    out = paddle.fluid.layers.sigmoid_cross_entropy_with_logits(
818 819
        logit, label, name=sigmoid_name)

Z
zhiboniu 已提交
820
    one = paddle.full(shape=[1], fill_value=1.0, dtype=logit.dtype)
821
    if pos_weight is not None:
822 823 824
        check_variable_and_dtype(pos_weight, 'pos_weight',
                                 ['float32', 'float64'],
                                 'binary_cross_entropy_with_logits')
825
        log_weight = paddle.add(
826
            paddle.multiply(label, paddle.subtract(pos_weight, one)), one)
827 828 829 830
        pos_weight_name = name if reduction == 'none' and weight is None else None
        out = paddle.multiply(out, log_weight, name=pos_weight_name)

    if weight is not None:
831 832
        check_variable_and_dtype(weight, 'weight', ['float32', 'float64'],
                                 'binary_cross_entropy_with_logits')
833 834 835 836 837 838 839 840 841 842
        weight_name = name if reduction == 'none' else None
        out = paddle.multiply(out, weight, name=weight_name)

    if reduction == "sum":
        return paddle.sum(out, name=name)
    elif reduction == "mean":
        return paddle.mean(out, name=name)
    return out


843 844 845 846 847 848 849 850 851 852 853 854
def hsigmoid_loss(input,
                  label,
                  num_classes,
                  weight,
                  bias=None,
                  path_table=None,
                  path_code=None,
                  is_sparse=False,
                  name=None):
    """
    The hierarchical sigmoid organizes the classes into a complete binary tree to reduce the computational complexity
    and speed up the model training, especially the training of language model.
855

856 857 858
    Each leaf node of the complete binary tree represents a class(word) and each non-leaf node acts as a binary classifier.
    For each class(word), there's a unique path from root to itself, hsigmoid calculate the cost for each non-leaf node on
    the path, and sum them to get a total cost.
859 860

    Comparing to softmax, hsigmoid can reduce the computational complexity from :math:`O(N)` to :math:`O(logN)`, where :math:`N`
861 862
    represents the number of classes or the size of word dict.

863 864 865 866
    The API supports default tree and custom tree. For the default tree, you can refer to `Hierarchical Probabilistic Neural
    Network Language Model <http://www.iro.umontreal.ca/~lisa/pointeurs/hierarchical-nnlm-aistats05.pdf>`_.

    For the custom tree, you need to set :attr:`is_custom` to True, and do the following steps (take the language model as an example):
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912

    1. Using a custom word dict to build a binary tree, each leaf node should be an word in the word dict.
    2. Creating a dict map word_id -> path that from the word to the root node, we call it path_table.
    3. Creating a dict map word_id -> code of path that from the word to the root node, we call it path_code.
       Code means the label of each binary classifier, 1 indicate true, 0 indicate false.
    4. Now, each word should has its path and code along the path, you can pass a batch of path and code related
       to the same batch of inputs.

    Parameters:
        input (Tensor): A tensor with the shape [N, D], where N is the size of mini-batch,
            and D is the feature size. Its data type supports float32 or float64.
        label (Tensor): A tensor contains the labels of training data. Its shape is [N, 1]
            and data type is int64.
        num_classes (int): The number of classes or the size of word dict, must be greater than 2.
            If the default tree is used (path_code and path_table is None are None), `num_classes`
            should not be None. If the custom tree is used (path_code and path_table is None are not None),
            `num_classes` should be the number of non-leaf nodes, which indicates the num of
            classes using by the binary classifier.
        weight (Tensor): A tensor with shape (num_classes - 1, D), with the same data type as `input`.
        bias (Tensor, optional): A tensor with shape (num_classes - 1, 1), with the same data type as `input`.
            If `bias` is None, no bias will be add. Default is None.
        path_table (Tensor, optional): A tensor that stores each batch of samples' path from leaf to root
            node, its shape is [N, L] and data type is int64, where L is the length of path. For each sample i,
            path_table[i] is a np.array like structure and each element in this array is the indexes in parent
            nodes' weight matrix. If `path_table` and `path_code` are None, the default tree will be used.
            Default is None.
        path_code (Tensor, optional): A tensor that stores each batch of samples' code of path from leaf
            to root node, its shape is [N, L] and data type is int64, which is the same as :attr:`path_table`.
            Each code of path is consisted with the code of nodes from leaf to root node. If `path_table` and
            `path_code` are None, the default tree will be used. Default is None.
        is_sparse (bool, optional): Whether use sparse updating instead of dense updating. If `is_sparse` is True,
            the gradient of `weight` and `input` will be sparse. Default is False.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        A tensor with the cost of hierarchical sigmoid, its shape is [N, 1] and data type is the same as `input`.

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            paddle.set_device('cpu')

L
Linjie Chen 已提交
913 914 915 916 917
            input = paddle.uniform([4, 3])
            # [[0.45424712  -0.77296764  0.82943869] # random
            #  [0.85062802  0.63303483  0.35312140] # random
            #  [0.57170701  0.16627562  0.21588242] # random
            #  [0.27610803  -0.99303514  -0.17114788]] # random
918 919 920
            label = paddle.to_tensor([0, 1, 4, 5])
            num_classes = 5
            weight=paddle.uniform([num_classes-1, 3])
L
Linjie Chen 已提交
921 922 923 924
            # [[-0.64477652  0.24821866  -0.17456549] # random
            #  [-0.04635394  0.07473493  -0.25081766] # random
            #  [ 0.05986035  -0.12185556  0.45153677] # random
            #  [-0.66236806  0.91271877  -0.88088769]] # random
925 926

            out=F.hsigmoid_loss(input, label, num_classes, weight)
L
Linjie Chen 已提交
927 928 929 930
            # [[1.96709502]
            #  [2.40019274]
            #  [2.11009121]
            #  [1.92374969]]
931
    """
932
    if in_dygraph_mode():
933 934
        out, _, _ = _C_ops.hierarchical_sigmoid(input, weight, label,
                                                path_table, path_code, bias,
935 936 937 938 939 940 941
                                                num_classes, is_sparse, 0, [],
                                                [], [], is_sparse)
        return out
    elif _in_legacy_dygraph():
        out, _, _ = _legacy_C_ops.hierarchical_sigmoid(
            input, weight, label, path_table, path_code, bias, 'num_classes',
            num_classes, 'is_sparse', is_sparse, 'remote_prefetch', is_sparse)
942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
        return out

    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             'hsigmoid_loss')
    check_variable_and_dtype(label, 'label', ['int64'], 'hsigmoid_loss')
    check_variable_and_dtype(weight, 'weight', ['float32', 'float64'],
                             'hsigmoid_loss')
    if bias is not None:
        check_variable_and_dtype(bias, 'bias', ['float32', 'float64'],
                                 'hsigmoid_loss')
    if path_table is not None:
        check_variable_and_dtype(path_table, 'path_table', ['int64'],
                                 'hsigmoid_loss')
    if path_code is not None:
        check_variable_and_dtype(path_code, 'path_code', ['int64'],
                                 'hsigmoid_loss')

    attrs = {
        "num_classes": num_classes,
        "is_sparse": is_sparse,
        "remote_prefetch": is_sparse
    }

    inputs = {
        "X": input,
        "W": weight,
        "Bias": bias,
        "PathTable": path_table,
        "PathCode": path_code,
        "Label": label
    }

    helper = LayerHelper('hsigmoid_loss', **locals())
    out = helper.create_variable_for_type_inference(input.dtype)
    pre_out = helper.create_variable_for_type_inference(input.dtype)
    outputs = {"Out": out, "PreOut": pre_out, "W_Out": weight}

979 980 981 982
    helper.append_op(type="hierarchical_sigmoid",
                     inputs=inputs,
                     outputs=outputs,
                     attrs=attrs)
983 984 985
    return out


986
def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None):
987
    r"""
988
    Calculate smooth_l1_loss. Creates a criterion that uses a squared
989 990 991 992 993 994
    term if the absolute element-wise error falls below 1 and an L1 term otherwise.
    In some cases it can prevent exploding gradients and it is more robust and less
    sensitivity to outliers. Also known as the Huber loss:

    .. math::

995
        loss(x,y) = \frac{1}{n}\sum_{i}z_i
996 997


998
    where :math:`z_i` is given by:
999 1000 1001

    .. math::

1002
        \mathop{z_i} = \left\{\begin{array}{rcl}
1003 1004 1005
                0.5(x_i - y_i)^2 & & {if |x_i - y_i| < \delta} \\
                \delta * |x_i - y_i| - 0.5 * \delta^2 & & {otherwise}
            \end{array} \right.
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018

    Parameters:
        input (Tensor): Input tensor, the data type is float32 or float64. Shape is
            (N, C), where C is number of classes, and if shape is more than 2D, this
            is (N, C, D1, D2,..., Dk), k >= 1.
        label (Tensor): Label tensor, the data type is float32 or float64. The shape of label
            is the same as the shape of input.
        reduction (str, optional): Indicate how to average the loss by batch_size,
            the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
            If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
            If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned.
            If :attr:`reduction` is ``'none'``, the unreduced loss is returned.
            Default is ``'mean'``.
1019
        delta (float, optional): Specifies the hyperparameter :math:`\delta` to be used.
1020 1021 1022
            The value determines how large the errors need to be to use L1. Errors
            smaller than delta are minimized with L2. Parameter is ignored for
            negative/zero values. Default = 1.0
1023
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1024 1025

    Returns:
1026
        Tensor, The tensor variable storing the smooth_l1_loss of input and label.
1027 1028 1029 1030 1031 1032

    Examples:
        .. code-block:: python

            import paddle

1033 1034
            input = paddle.rand([3, 3]).astype('float32')
            label = paddle.rand([3, 3]).astype('float32')
C
Chen Long 已提交
1035
            output = paddle.nn.functional.smooth_l1_loss(input, label)
G
Guanghua Yu 已提交
1036
            print(output)
1037
            # [0.068004]
1038
    """
1039 1040 1041 1042
    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             'smooth_l1_loss')
    check_variable_and_dtype(label, 'label', ['float32', 'float64'],
                             'smooth_l1_loss')
1043

1044
    if in_dygraph_mode():
1045
        out, residual = _C_ops.huber_loss(input, label, delta)
1046 1047 1048 1049 1050 1051
    else:
        helper = LayerHelper('huber_loss', **locals())
        residual = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
        helper.append_op(type='huber_loss',
                         inputs={
                             'X': input,
                             'Y': label
                         },
                         outputs={
                             'Out': out,
                             'Residual': residual
                         },
                         attrs={'delta': delta})
1062 1063 1064 1065 1066 1067 1068 1069

    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
            "The value of 'reduction' in smooth_l1_loss should be 'sum', 'mean' or"
            " 'none', but received %s, which is not allowed." % reduction)
    if reduction == 'none':
        return out
    elif reduction == 'mean':
1070
        return paddle.mean(out)
1071
    elif reduction == 'sum':
1072
        return paddle.sum(out)
1073 1074


1075 1076
def margin_ranking_loss(input,
                        other,
1077
                        label,
1078 1079 1080
                        margin=0.0,
                        reduction='mean',
                        name=None):
1081
    r"""
1082

1083
    Calcluate the margin rank loss between the input, other and label, use the math function as follows.
1084

1085
    .. math::
1086
        margin\_rank\_loss = max(0, -label * (input - other) + margin)
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102

    If :attr:`reduction` set to ``'mean'``, the reduced mean loss is:

    .. math::
        Out = MEAN(margin\_rank\_loss)

    If :attr:`reduction` set to ``'sum'``, the reduced sum loss is:

    .. math::
        Out = SUM(margin\_rank\_loss)

    If :attr:`reduction` set to ``'none'``, just return the origin ``margin_rank_loss``.

    Parameters:
        input(Tensor): the first input tensor, it's data type should be float32, float64.
        other(Tensor): the second input tensor, it's data type should be float32, float64.
1103
        label(Tensor): the label value corresponding to input, it's data type should be float32, float64.
1104 1105 1106 1107
        margin (float, optional): The margin value to add, default value is 0;
        reduction (str, optional): Indicate the reduction to apply to the loss, the candicates are ``'none'``, ``'mean'``, ``'sum'``.If :attr:`reduction` is ``'none'``, the unreduced loss is returned; If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned. If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned. Default is ``'mean'``.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

1108
    Returns:
1109
        Tensor, if :attr:`reduction` is ``'mean'`` or ``'sum'``, the out shape is :math:`[1]`, otherwise the shape is the same as `input` .The same dtype as input tensor.
1110 1111 1112 1113 1114

    Examples:

        .. code-block:: python

1115 1116
            import paddle

Z
Zhong Hui 已提交
1117 1118 1119
            input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
            other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32')
            label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32')
1120
            loss = paddle.nn.functional.margin_ranking_loss(input, other, label)
N
Noel 已提交
1121
            print(loss) # [0.75]
1122
    """
1123 1124 1125 1126
    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
            "The value of 'reduction' in MarginRankingLoss should be 'sum', 'mean' or 'none', but "
            "received %s, which is not allowed." % reduction)
1127
    if in_dygraph_mode():
1128 1129
        out = _C_ops.subtract(other, input)
        out = _C_ops.multiply(out, label)
1130 1131
        if margin != 0.0:
            margin = fluid.dygraph.base.to_variable([margin], dtype=out.dtype)
1132 1133
            out = _C_ops.add(out, margin)
        out = _C_ops.relu(out)
1134
        if reduction == 'sum':
1135
            return _C_ops.sum(out, [], None, False)
1136
        elif reduction == 'mean':
1137
            return _C_ops.mean_all(out)
1138 1139
        return out
    elif _in_legacy_dygraph():
1140 1141
        out = _legacy_C_ops.elementwise_sub(other, input)
        out = _legacy_C_ops.elementwise_mul(out, label)
1142 1143
        if margin != 0.0:
            margin = fluid.dygraph.base.to_variable([margin], dtype=out.dtype)
1144 1145
            out = _legacy_C_ops.elementwise_add(out, margin)
        out = _legacy_C_ops.relu(out)
1146
        if reduction == 'sum':
1147
            return _legacy_C_ops.reduce_sum(out, 'reduce_all', True)
1148
        elif reduction == 'mean':
1149
            return _legacy_C_ops.mean(out)
1150 1151 1152
        return out

    helper = LayerHelper("margin_ranking_loss", **locals())
1153 1154 1155 1156 1157 1158
    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             'margin_rank_loss')
    check_variable_and_dtype(other, 'other', ['float32', 'float64'],
                             'margin_rank_loss')
    check_variable_and_dtype(label, 'label', ['float32', 'float64'],
                             'margin_rank_loss')
1159

1160 1161 1162
    out = paddle.subtract(input, other)
    neg_label = paddle.neg(label)
    out = paddle.multiply(neg_label, out)
1163 1164 1165

    if margin != 0.0:
        margin_var = out.block.create_var(dtype=out.dtype)
Z
zhiboniu 已提交
1166
        margin_var = paddle.full(shape=[1], fill_value=margin, dtype=out.dtype)
1167 1168 1169 1170 1171
        out = paddle.add(out, margin_var)

    result_out = helper.create_variable_for_type_inference(input.dtype)

    if reduction == 'none':
1172 1173 1174
        helper.append_op(type="relu",
                         inputs={"X": out},
                         outputs={"Out": result_out})
1175 1176 1177 1178
        return result_out
    elif reduction == 'sum':
        out = paddle.nn.functional.relu(out)
        attrs = {"dim": [0], "keep_dim": False, "reduce_all": True}
1179 1180 1181 1182
        helper.append_op(type="reduce_sum",
                         inputs={"X": out},
                         outputs={"Out": result_out},
                         attrs=attrs)
1183 1184 1185
        return result_out
    elif reduction == 'mean':
        out = paddle.nn.functional.relu(out)
1186 1187 1188 1189
        helper.append_op(type="mean",
                         inputs={"X": out},
                         outputs={"Out": result_out},
                         attrs={})
1190 1191 1192
        return result_out


1193
def l1_loss(input, label, reduction='mean', name=None):
1194
    r"""
1195
    Computes the L1 Loss of Tensor ``input`` and ``label`` as follows.
1196

1197
    If `reduction` set to ``'none'``, the loss is:
1198 1199

    .. math::
1200
        Out = \lvert input - label \rvert
1201

1202
    If `reduction` set to ``'mean'``, the loss is:
1203 1204

    .. math::
1205
        Out = MEAN(\lvert input - label \rvert)
1206

1207
    If `reduction` set to ``'sum'``, the loss is:
1208 1209

    .. math::
1210
        Out = SUM(\lvert input - label \rvert)
1211

1212

1213
    Parameters:
N
Noel 已提交
1214 1215
        input (Tensor): The input tensor. The shapes is [N, `*`], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64.
        label (Tensor): label. The shapes is [N, `*`], same shape as ``input`` . It's data type should be float32, float64, int32, int64.
1216
        reduction (str, optional): Indicate the reduction to apply to the loss,
1217
            the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
1218 1219 1220
            If `reduction` is ``'none'``, the unreduced loss is returned;
            If `reduction` is ``'mean'``, the reduced mean loss is returned.
            If `reduction` is ``'sum'``, the reduced sum loss is returned.
1221 1222
            Default is ``'mean'``.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
N
Noel 已提交
1223

1224
    Returns:
1225
        Tensor, the L1 Loss of Tensor ``input`` and ``label``.
1226 1227
        If `reduction` is ``'none'``, the shape of output loss is [N, *], the same as ``input`` .
        If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1].
N
Noel 已提交
1228

1229 1230
    Examples:
        .. code-block:: python
N
Noel 已提交
1231

1232
            import paddle
1233

1234 1235
            input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]])
            label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]])
1236

1237
            l1_loss = paddle.nn.functional.l1_loss(input, label)
1238
            print(l1_loss.numpy())
1239 1240
            # [0.35]

1241
            l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='none')
1242
            print(l1_loss.numpy())
1243 1244 1245
            # [[0.20000005 0.19999999]
            # [0.2        0.79999995]]

1246
            l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='sum')
1247
            print(l1_loss.numpy())
1248 1249 1250 1251 1252 1253 1254
            # [1.4]
    """
    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
            "The value of 'reduction' in L1Loss should be 'sum', 'mean' or 'none', but "
            "received %s, which is not allowed." % reduction)

1255
    if in_dygraph_mode():
1256 1257
        unreduced = _C_ops.abs(_C_ops.subtract(input, label))

1258
        if reduction == 'mean':
1259
            return _C_ops.mean_all(unreduced)
1260
        elif reduction == 'sum':
1261
            return _C_ops.sum(unreduced, [], None, False)
1262 1263
        else:
            return unreduced
1264
    elif _in_legacy_dygraph():
1265 1266 1267 1268 1269
        unreduced = _elementwise_op_in_dygraph(input,
                                               label,
                                               axis=-1,
                                               act='abs',
                                               op_name='elementwise_sub')
1270
        if reduction == 'mean':
1271
            return _legacy_C_ops.mean(unreduced)
1272
        elif reduction == 'sum':
1273 1274
            return _legacy_C_ops.reduce_sum(unreduced, 'dim', [0], 'keep_dim',
                                            False, 'reduce_all', True)
1275 1276 1277
        else:
            return unreduced

1278 1279 1280 1281 1282 1283
    check_variable_and_dtype(input, 'input',
                             ['float32', 'float64', 'int32', 'int64'],
                             'l1_loss')
    check_variable_and_dtype(label, 'label',
                             ['float32', 'float64', 'int32', 'int64'],
                             'l1_loss')
1284 1285

    if reduction == 'sum':
1286
        unreduced = paddle.fluid.layers.elementwise_sub(input, label, act='abs')
1287 1288
        return paddle.sum(unreduced, name=name)
    elif reduction == 'mean':
1289
        unreduced = paddle.fluid.layers.elementwise_sub(input, label, act='abs')
1290 1291
        return paddle.mean(unreduced, name=name)
    else:
1292 1293 1294 1295
        return paddle.fluid.layers.elementwise_sub(input,
                                                   label,
                                                   act='abs',
                                                   name=name)
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317


def nll_loss(input,
             label,
             weight=None,
             ignore_index=-100,
             reduction='mean',
             name=None):
    """
    This api returns negative log likelihood.
    See more detail in :ref:`api_nn_loss_NLLLoss` .

    Parameters:
         input (Tensor): Input tensor, the shape is :math:`[N, C]`, `C` is the number of classes.
             But in K-dimension situation, the shape is :math:`[N, C, d_1, d_2, ..., d_K]`.
             The data type is float32, float64.
         label (Tensor): Label tensor, the shape is :math:`[N,]` or :math:`[N, d_1, d_2, ..., d_K]`.
             The data type is int64.
         weight (Tensor, optional): Weight tensor, a manual rescaling weight given
             to each class. If given, it has to be a 1D Tensor whose size is `[C, ]`. Otherwise,
             it treated as if having all ones. the data type is
             float32, float64, Default is ``'None'``.
1318 1319
         ignore_index (int, optional): Specifies a target value that is ignored
             and does not contribute to the input gradient. Default is -100.
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
         reduction (str, optional): Indicate how to average the loss,
             the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
             If `reduction` is ``'mean'``, the reduced mean loss is returned;
             if `reduction` is ``'sum'``, the reduced sum loss is returned;
             if `reduction` is ``'none'``, no reduction will be apllied.
             Default is ``'mean'``.
         name (str, optional): Name for the operation (optional, default is None).
             For more information, please refer to :ref:`api_guide_Name`.

    Returns:
         `Tensor`, the value of negative log likelihood loss.

    Examples:
        .. code-block:: python
1334

1335 1336 1337 1338
                import paddle
                from paddle.nn.functional import nll_loss
                log_softmax = paddle.nn.LogSoftmax(axis=1)

1339 1340 1341 1342 1343
                input = paddle.to_tensor([[0.88103855, 0.9908683 , 0.6226845 ],
                          [0.53331435, 0.07999352, 0.8549948 ],
                          [0.25879037, 0.39530203, 0.698465  ],
                          [0.73427284, 0.63575995, 0.18827209],
                          [0.05689114, 0.0862954 , 0.6325046 ]], "float32")
1344
                log_out = log_softmax(input)
1345
                label = paddle.to_tensor([0, 2, 1, 1, 0], "int64")
1346
                result = nll_loss(log_out, label)
1347
                print(result) # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True, [1.07202101])
1348 1349 1350 1351 1352 1353 1354 1355 1356
    """
    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
            "The value of 'reduction' in nll_loss should be 'sum', 'mean' or "
            "'none', but received %s, which is not allowed." % reduction)

    input_shape = list(input.shape)
    input_dims = len(input_shape)
    if input_dims < 2:
1357 1358
        raise ValueError(
            'Expected 2 or more dimensions (got {})'.format(input_dims))
1359 1360
    n = input_shape[0]
    c = input_shape[1]
Z
zyfncg 已提交
1361 1362
    if in_dygraph_mode():
        if input_dims != 2 and input_dims != 4:
1363 1364
            input = _C_ops.reshape(input, [n, c, 1, -1])
            label = _C_ops.reshape(label, [n, 1, -1])
Z
zyfncg 已提交
1365
            out_shape = [n] + input_shape[2:]
1366 1367
        out, total_weight = _C_ops.nll_loss(input, label, weight, ignore_index,
                                            reduction)
Z
zyfncg 已提交
1368
        if input_dims != 2 and input_dims != 4 and reduction == 'none':
1369
            out = _C_ops.reshape(out, out_shape)
Z
zyfncg 已提交
1370
        return out
1371
    elif _in_legacy_dygraph():
1372
        if input_dims != 2 and input_dims != 4:
1373 1374 1375
            input, _ = _legacy_C_ops.reshape2(input, None, 'shape',
                                              [n, c, 1, -1])
            label, _ = _legacy_C_ops.reshape2(label, None, 'shape', [n, 1, -1])
1376
            out_shape = [n] + input_shape[2:]
H
hong 已提交
1377

1378 1379 1380
        out, total_weight = _legacy_C_ops.nll_loss(input, label, weight,
                                                   'ignore_index', ignore_index,
                                                   'reduction', reduction)
1381
        if input_dims != 2 and input_dims != 4 and reduction == 'none':
1382
            out, _ = _legacy_C_ops.reshape2(out, None, 'shape', out_shape)
1383 1384 1385 1386 1387 1388 1389 1390 1391
        return out

    helper = LayerHelper('nll_loss', **locals())

    if input_dims != 2 and input_dims != 4:
        input = reshape(input, shape=[n, c, 1, -1])
        label = reshape(label, shape=[n, 1, -1])
        out_shape = [n] + input_shape[2:]

1392 1393
    check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'nll_loss')
    check_variable_and_dtype(label, 'label', ['int64'], 'nll_loss')
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
    inputs = {'X': input, 'Label': label}
    attrs = {'reduction': reduction, 'ignore_index': ignore_index}
    if weight is not None:
        if isinstance(weight, Variable):
            inputs['Weight'] = weight

    out = helper.create_variable_for_type_inference(dtype=input.dtype)
    total_weight = helper.create_variable_for_type_inference(dtype=input.dtype)
    outputs = {'Out': out, 'Total_weight': total_weight}

1404 1405 1406 1407
    helper.append_op(type='nll_loss',
                     inputs=inputs,
                     outputs=outputs,
                     attrs=attrs)
1408 1409 1410 1411
    if input_dims != 2 and input_dims != 4 and reduction == 'none':
        out = reshape(out, shape=out_shape)

    return out
1412 1413


1414
def kl_div(input, label, reduction='mean', name=None):
1415
    r"""
1416
    Calculate the Kullback-Leibler divergence loss
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
    between Input(X) and Input(Target). Notes that Input(X) is the
    log-probability and Input(Target) is the probability.

    KL divergence loss is calculated as follows:

    $$l(x, y) = y * (\log(y) - x)$$

    While :math:`x` is input and :math:`y` is label.

    While :attr:`reduction` is :attr:`none`, output loss is in
1427
    the same shape as input, loss in each point is calculated
1428
    separately and no reduction is applied.
1429

1430 1431
    While :attr:`reduction` is :attr:`mean`, output loss is in
    shape of [1] and loss value is the mean value of all losses.
1432

1433 1434
    While :attr:`reduction` is :attr:`sum`, output loss is in
    shape of [1] and loss value is the sum value of all losses.
1435 1436

    While :attr:`reduction` is :attr:`batchmean`, output loss is
1437 1438 1439 1440
    in shape of [1] and loss value is the sum value of all losses
    divided by batch size.

    Args:
1441
        input (Tensor): The input tensor. The shapes is [N, *], where N is batch size and `*` means
1442 1443 1444 1445 1446 1447 1448 1449 1450
             any number of additional dimensions. It's data type should be float32, float64.
        label (Tensor): label. The shapes is [N, *], same shape as ``input`` . It's data type should be float32, float64.
        reduction (Tensor): Indicate how to average the loss,
             the candicates are ``'none'`` | ``'batchmean'`` | ``'mean'`` | ``'sum'``.
             If `reduction` is ``'mean'``, the reduced mean loss is returned;
             If `reduction` is ``'batchmean'``, the sum loss divided by batch size is returned;
             if `reduction` is ``'sum'``, the reduced sum loss is returned;
             if `reduction` is ``'none'``, no reduction will be apllied.
             Default is ``'mean'``.
1451
        name(str, optional): Name for the operation (optional, default is None). For more information,
1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
            please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor: The KL divergence loss. The data type is same as input tensor

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F
1462

1463
            shape = (5, 20)
1464 1465
            x = paddle.uniform(shape, min=-10, max=10).astype('float32')
            target = paddle.uniform(shape, min=-10, max=10).astype('float32')
1466

L
LielinJiang 已提交
1467
            # 'batchmean' reduction, loss shape will be [1]
1468
            pred_loss = F.kl_div(x, target, reduction='batchmean')
L
LielinJiang 已提交
1469
            # shape=[1]
1470

1471
            # 'mean' reduction, loss shape will be [1]
1472
            pred_loss = F.kl_div(x, target, reduction='mean')
1473 1474 1475
            # shape=[1]

            # 'sum' reduction, loss shape will be [1]
1476
            pred_loss = F.kl_div(x, target, reduction='sum')
1477 1478 1479
            # shape=[1]

            # 'none' reduction, loss shape is same with input shape
1480
            pred_loss = F.kl_div(x, target, reduction='none')
1481 1482 1483
            # shape=[5, 20]

    """
L
LielinJiang 已提交
1484 1485 1486 1487
    # ugly type promotion
    if fluid.data_feeder.convert_dtype(
            input.dtype) == 'float32' and fluid.data_feeder.convert_dtype(
                label.dtype) == 'float64':
1488
        input = paddle.cast(input, 'float64')
L
LielinJiang 已提交
1489 1490 1491
    elif fluid.data_feeder.convert_dtype(
            input.dtype) == 'float64' and fluid.data_feeder.convert_dtype(
                label.dtype) == 'float32':
1492
        label = paddle.cast(label, 'float64')
L
LielinJiang 已提交
1493

1494
    if in_dygraph_mode():
1495
        out = _C_ops.kldiv_loss(input, label, 'none')
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
        if reduction == 'mean':
            out = paddle.mean(out)
        elif reduction == 'sum':
            out = paddle.sum(out)
        elif reduction == 'batchmean':
            if len(input.shape) > 0:
                batch_size = input.shape[0]
                out = paddle.sum(out) / batch_size
        return out
    elif _in_legacy_dygraph():
1506
        out = _legacy_C_ops.kldiv_loss(input, label, 'reduction', 'none')
1507 1508 1509 1510 1511 1512 1513 1514
        if reduction == 'mean':
            out = paddle.mean(out)
        elif reduction == 'sum':
            out = paddle.sum(out)
        elif reduction == 'batchmean':
            if len(input.shape) > 0:
                batch_size = input.shape[0]
                out = paddle.sum(out) / batch_size
1515 1516 1517 1518
        return out

    helper = LayerHelper('kl_div', **locals())

1519 1520
    check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'kl_div')
    check_variable_and_dtype(label, 'label', ['float32', 'float64'], 'kl_div')
1521 1522 1523
    fluid.data_feeder.check_type(reduction, 'reduction', str, 'kl_div')

    loss = helper.create_variable_for_type_inference(dtype=input.dtype)
1524 1525 1526 1527 1528 1529 1530
    helper.append_op(type='kldiv_loss',
                     inputs={
                         'X': input,
                         'Target': label
                     },
                     outputs={'Loss': loss},
                     attrs={'reduction': 'none'})
1531 1532 1533 1534 1535 1536 1537 1538

    if reduction == 'mean':
        loss = paddle.mean(loss)
    elif reduction == 'sum':
        loss = paddle.sum(loss)
    elif reduction == 'batchmean':
        batch_size = paddle.shape(input)[0]
        loss = paddle.sum(loss) / batch_size
1539 1540 1541
    return loss


1542
def mse_loss(input, label, reduction='mean', name=None):
1543
    r"""
1544
    Accept input predications and label and returns the mean square error.
1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573

    If :attr:`reduction` is set to ``'none'``, loss is calculated as:

    .. math::
        Out = (input - label)^2

    If :attr:`reduction` is set to ``'mean'``, loss is calculated as:

    .. math::
        Out = \operatorname{mean}((input - label)^2)

    If :attr:`reduction` is set to ``'sum'``, loss is calculated as:

    .. math::
        Out = \operatorname{sum}((input - label)^2)

    Parameters:
        input (Tensor): Input tensor, the data type should be float32 or float64.
        label (Tensor): Label tensor, the data type should be float32 or float64.
        reduction (string, optional): The reduction method for the output,
            could be 'none' | 'mean' | 'sum'.
            If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned.
            If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned.
            If :attr:`reduction` is ``'none'``, the unreduced loss is returned.
            Default is ``'mean'``.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.


    Returns:
1574
        Tensor, The tensor tensor storing the mean square error difference of input and label.
1575

1576 1577 1578
    Examples:

        .. code-block:: python
1579

1580 1581
            import paddle
            mse_loss = paddle.nn.loss.MSELoss()
1582 1583
            input = paddle.to_tensor(1.5)
            label = paddle.to_tensor(1.7)
1584
            output = mse_loss(input, label)
B
Bai Yifan 已提交
1585
            print(output)
1586 1587 1588 1589 1590 1591 1592 1593 1594
            # [0.04000002]

    """

    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
            "'reduction' in 'mse_loss' should be 'sum', 'mean' or 'none', "
            "but received {}.".format(reduction))

Z
zhiboniu 已提交
1595
    if not in_dynamic_mode():
1596 1597 1598 1599
        check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                                 'mse_loss')
        check_variable_and_dtype(label, 'label', ['float32', 'float64'],
                                 'mse_loss')
1600 1601

    if reduction == 'none':
1602
        return paddle.square(paddle.subtract(input, label), name=name)
1603
    elif reduction == 'mean':
1604 1605
        return paddle.mean(paddle.square(paddle.subtract(input, label)),
                           name=name)
1606
    else:
1607
        return paddle.sum(paddle.square(paddle.subtract(input, label)),
1608
                          name=name)
1609 1610


1611 1612 1613 1614 1615
def ctc_loss(log_probs,
             labels,
             input_lengths,
             label_lengths,
             blank=0,
1616
             reduction='mean',
H
Hui Zhang 已提交
1617
             norm_by_times=False):
1618 1619
    """

1620 1621 1622
    An operator integrating the open source Warp-CTC library (https://github.com/baidu-research/warp-ctc)
    to compute Connectionist Temporal Classification (CTC) loss.
    It can be aliased as softmax with CTC, since a native softmax activation
1623 1624 1625
    is interated to the Warp-CTC library to normalize values for each row of the input tensor.

    Parameters:
1626
        log_probs (Tensor): The unscaled probability sequence with padding, which is a 3-D Tensor. The tensor shape is [max_logit_length, batch_size, num_classes + 1], where max_logit_length is the longest length of input logit sequence. The data type should be float32 or float64.
1627 1628 1629 1630 1631
        labels (Tensor): The ground truth sequence with padding, which must be a 3-D Tensor. The tensor shape is [batch_size, max_label_length], where max_label_length is the longest length of label sequence. The data type must be int32.
        input_lengths (Tensor): The length for each input sequence, it should have shape [batch_size] and dtype int64.
        label_lengths (Tensor): The length for each label sequence, it should have shape [batch_size] and dtype int64.
        blank (int, optional): The blank label index of Connectionist Temporal Classification (CTC) loss, which is in the half-opened interval [0, num_classes + 1). The data type must be int32. Default is 0.
        reduction (string, optional): Indicate how to average the loss, the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'mean'``, the output loss will be divided by the label_lengths, and then return the mean of quotient; If :attr:`reduction` is ``'sum'``, return the sum of loss; If :attr:`reduction` is ``'none'``, no reduction will be applied. Default is ``'mean'``.
1632
        norm_by_times (bool, default False) – Whether to normalize the gradients by the number of time-step, which is also the sequence’s length. There is no need to normalize the gradients if reduction mode is 'mean'.
H
Hui Zhang 已提交
1633

1634 1635
    Returns:
        Tensor, The Connectionist Temporal Classification (CTC) loss between ``log_probs`` and  ``labels``. If attr:`reduction` is ``'none'``, the shape of loss is [batch_size], otherwise, the shape of loss is [1]. Data type is the same as ``log_probs``.
1636

1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674
    Examples:

        .. code-block:: python

            # declarative mode
            import paddle.nn.functional as F
            import numpy as np
            import paddle

            # length of the longest logit sequence
            max_seq_length = 4
            #length of the longest label sequence
            max_label_length = 3
            # number of logit sequences
            batch_size = 2
            # class num
            class_num = 3

            np.random.seed(1)
            log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
                                    [3.02332580e-01, 1.46755889e-01, 9.23385918e-02]],

                                    [[1.86260208e-01, 3.45560730e-01, 3.96767467e-01],
                                    [5.38816750e-01, 4.19194520e-01, 6.85219526e-01]],

                                    [[2.04452246e-01, 8.78117442e-01, 2.73875929e-02],
                                    [6.70467496e-01, 4.17304814e-01, 5.58689833e-01]],

                                    [[1.40386939e-01, 1.98101491e-01, 8.00744593e-01],
                                    [9.68261600e-01, 3.13424170e-01, 6.92322612e-01]],

                                    [[8.76389146e-01, 8.94606650e-01, 8.50442126e-02],
                                    [3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32")
            labels = np.array([[1, 2, 2],
                            [1, 2, 2]]).astype("int32")
            input_lengths = np.array([5, 5]).astype("int64")
            label_lengths = np.array([3, 3]).astype("int64")

1675 1676 1677 1678
            log_probs = paddle.to_tensor(log_probs)
            labels = paddle.to_tensor(labels)
            input_lengths = paddle.to_tensor(input_lengths)
            label_lengths = paddle.to_tensor(label_lengths)
1679

1680 1681 1682 1683
            loss = F.ctc_loss(log_probs, labels,
                input_lengths,
                label_lengths,
                blank=0,
1684
                reduction='none')
1685
            print(loss)  #[3.9179852 2.9076521]
1686

1687 1688 1689 1690 1691
            loss = F.ctc_loss(log_probs, labels,
                input_lengths,
                label_lengths,
                blank=0,
                reduction='mean')
1692
            print(loss)  #[1.1376063]
1693 1694 1695

    """

1696
    loss_out = fluid.layers.warpctc(log_probs, labels, blank, norm_by_times,
H
Hui Zhang 已提交
1697
                                    input_lengths, label_lengths)
1698

Z
zhiboniu 已提交
1699
    loss_out = paddle.squeeze(loss_out, [-1])
1700 1701
    assert reduction in ['mean', 'sum', 'none']
    if reduction == 'mean':
S
ShenLiang 已提交
1702
        loss_out = paddle.mean(loss_out / label_lengths)
1703 1704 1705 1706 1707
    elif reduction == 'sum':
        loss_out = paddle.sum(loss_out)
    return loss_out


1708 1709 1710 1711 1712 1713 1714 1715 1716
def margin_cross_entropy(logits,
                         label,
                         margin1=1.0,
                         margin2=0.5,
                         margin3=0.0,
                         scale=64.0,
                         group=None,
                         return_softmax=False,
                         reduction='mean'):
1717
    r"""
1718 1719
    .. math::

1720
        L=-\frac{1}{N}\sum^N_{i=1}\log\frac{e^{s(cos(m_{1}\theta_{y_i}+m_{2})-m_{3})}}{e^{s(cos(m_{1}\theta_{y_i}+m_{2})-m_{3})}+\sum^n_{j=1,j\neq y_i} e^{scos\theta_{y_i}}}
1721

1722
    where the :math:`\theta_{y_i}` is the angle between the feature :math:`x` and
1723 1724 1725 1726
    the representation of class :math:`i`. The details of ArcFace loss
    could be referred to https://arxiv.org/abs/1801.07698.

    .. hint::
1727 1728 1729 1730
        The API supports single GPU and multi GPU, and don't supports CPU.
        For data parallel mode, set ``group=False``.
        For model parallel mode, set ``group=None`` or the group instance return by paddle.distributed.new_group.
        And logits.shape[-1] can be different at each rank.
1731 1732

    Args:
G
Guoxia Wang 已提交
1733
        logits (Tensor): shape[N, local_num_classes], the output of the normalized X multiply the normalized W.
1734
                The logits is shard_logits when using model parallel.
G
Guoxia Wang 已提交
1735 1736 1737 1738 1739
        label (Tensor): shape[N] or shape[N, 1], the groud truth label.
        margin1 (float, optional): m1 of margin loss, default value is `1.0`.
        margin2 (float, optional): m2 of margin loss, default value is `0.5`.
        margin3 (float, optional): m3 of margin loss, default value is `0.0`.
        scale (float, optional): s of margin loss, default value is `64.0`.
1740
        group (Group, optional): The group instance return by paddle.distributed.new_group
1741 1742
            or ``None`` for global default group or ``False`` for data parallel (do not communication cross ranks).
            Default is ``None``.
1743 1744 1745 1746 1747 1748 1749 1750
        return_softmax (bool, optional): Whether return softmax probability. Default value is `False`.
        reduction (str, optional): The candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
                    If :attr:`reduction` is ``'mean'``, return the average of loss;
                    If :attr:`reduction` is ``'sum'``, return the sum of loss;
                    If :attr:`reduction` is ``'none'``, no reduction will be applied.
                    Default value is `'mean'`.

    Returns:
1751 1752 1753 1754 1755 1756
        Tensor|tuple[Tensor, Tensor], return the cross entropy loss if
            `return_softmax` is False, otherwise the tuple (loss, softmax),
            softmax is shard_softmax when using model parallel, otherwise
            softmax is in the same shape with input logits. If
            ``reduction == None``, the shape of loss is ``[N, 1]``, otherwise
            the shape is ``[1]``.
1757 1758 1759 1760

    Examples:

    .. code-block:: python
G
Guoxia Wang 已提交
1761
        :name: code-example1
1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795

        # required: gpu
        # Single GPU
        import paddle
        m1 = 1.0
        m2 = 0.5
        m3 = 0.0
        s = 64.0
        batch_size = 2
        feature_length = 4
        num_classes = 4

        label = paddle.randint(low=0, high=num_classes, shape=[batch_size], dtype='int64')

        X = paddle.randn(
            shape=[batch_size, feature_length],
            dtype='float64')
        X_l2 = paddle.sqrt(paddle.sum(paddle.square(X), axis=1, keepdim=True))
        X = paddle.divide(X, X_l2)

        W = paddle.randn(
            shape=[feature_length, num_classes],
            dtype='float64')
        W_l2 = paddle.sqrt(paddle.sum(paddle.square(W), axis=0, keepdim=True))
        W = paddle.divide(W, W_l2)

        logits = paddle.matmul(X, W)
        loss, softmax = paddle.nn.functional.margin_cross_entropy(
            logits, label, margin1=m1, margin2=m2, margin3=m3, scale=s, return_softmax=True, reduction=None)

        print(logits)
        print(label)
        print(loss)
        print(softmax)
1796

1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809
        #Tensor(shape=[2, 4], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
        #       [[ 0.85204151, -0.55557678,  0.04994566,  0.71986042],
        #        [-0.20198586, -0.35270476, -0.55182702,  0.09749021]])
        #Tensor(shape=[2], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
        #       [2, 3])
        #Tensor(shape=[2, 1], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
        #       [[82.37059586],
        #        [12.13448420]])
        #Tensor(shape=[2, 4], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
        #       [[0.99978819, 0.00000000, 0.00000000, 0.00021181],
        #        [0.99992995, 0.00006468, 0.00000000, 0.00000537]])

    .. code-block:: python
G
Guoxia Wang 已提交
1810
        :name: code-example2
1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856

        # required: distributed
        # Multi GPU, test_margin_cross_entropy.py
        import paddle
        import paddle.distributed as dist
        strategy = dist.fleet.DistributedStrategy()
        dist.fleet.init(is_collective=True, strategy=strategy)
        rank_id = dist.get_rank()
        m1 = 1.0
        m2 = 0.5
        m3 = 0.0
        s = 64.0
        batch_size = 2
        feature_length = 4
        num_class_per_card = [4, 8]
        num_classes = paddle.sum(paddle.to_tensor(num_class_per_card))

        label = paddle.randint(low=0, high=num_classes.item(), shape=[batch_size], dtype='int64')
        label_list = []
        dist.all_gather(label_list, label)
        label = paddle.concat(label_list, axis=0)

        X = paddle.randn(
            shape=[batch_size, feature_length],
            dtype='float64')
        X_list = []
        dist.all_gather(X_list, X)
        X = paddle.concat(X_list, axis=0)
        X_l2 = paddle.sqrt(paddle.sum(paddle.square(X), axis=1, keepdim=True))
        X = paddle.divide(X, X_l2)

        W = paddle.randn(
            shape=[feature_length, num_class_per_card[rank_id]],
            dtype='float64')
        W_l2 = paddle.sqrt(paddle.sum(paddle.square(W), axis=0, keepdim=True))
        W = paddle.divide(W, W_l2)

        logits = paddle.matmul(X, W)
        loss, softmax = paddle.nn.functional.margin_cross_entropy(
            logits, label, margin1=m1, margin2=m2, margin3=m3, scale=s, return_softmax=True, reduction=None)

        print(logits)
        print(label)
        print(loss)
        print(softmax)

1857
        # python -m paddle.distributed.launch --gpus=0,1 test_margin_cross_entropy.py
1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
        ## for rank0 input
        #Tensor(shape=[4, 4], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
        #       [[ 0.32888934,  0.02408748, -0.02763289,  0.18173063],
        #        [-0.52893978, -0.10623845, -0.21596515, -0.06432517],
        #        [-0.00536345, -0.03924667,  0.66735314, -0.28640926],
        #        [-0.09907366, -0.48534973, -0.10365338, -0.39472322]])
        #Tensor(shape=[4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
        #       [11, 1 , 10, 11])

        ## for rank1 input
        #Tensor(shape=[4, 8], dtype=float64, place=CUDAPlace(1), stop_gradient=True,
        #       [[ 0.68654754,  0.28137170,  0.69694954, -0.60923933, -0.57077653,  0.54576703, -0.38709028,  0.56028204],
        #        [-0.80360371, -0.03042448, -0.45107338,  0.49559349,  0.69998950, -0.45411693,  0.61927630, -0.82808600],
        #        [ 0.11457570, -0.34785879, -0.68819499, -0.26189226, -0.48241491, -0.67685711,  0.06510185,  0.49660849],
        #        [ 0.31604851,  0.52087884,  0.53124749, -0.86176582, -0.43426329,  0.34786144, -0.10850784,  0.51566383]])
        #Tensor(shape=[4], dtype=int64, place=CUDAPlace(1), stop_gradient=True,
        #       [11, 1 , 10, 11])

        ## for rank0 output
        #Tensor(shape=[4, 1], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
        #       [[38.96608230],
        #        [81.28152394],
        #        [69.67229865],
        #        [31.74197251]])
        #Tensor(shape=[4, 4], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
        #       [[0.00000000, 0.00000000, 0.00000000, 0.00000000],
        #        [0.00000000, 0.00000000, 0.00000000, 0.00000000],
        #        [0.00000000, 0.00000000, 0.99998205, 0.00000000],
        #        [0.00000000, 0.00000000, 0.00000000, 0.00000000]])
        ## for rank1 output
        #Tensor(shape=[4, 1], dtype=float64, place=CUDAPlace(1), stop_gradient=True,
        #       [[38.96608230],
        #        [81.28152394],
        #        [69.67229865],
        #        [31.74197251]])
        #Tensor(shape=[4, 8], dtype=float64, place=CUDAPlace(1), stop_gradient=True,
        #       [[0.33943993, 0.00000000, 0.66051859, 0.00000000, 0.00000000, 0.00004148, 0.00000000, 0.00000000],
        #        [0.00000000, 0.00000000, 0.00000000, 0.00000207, 0.99432097, 0.00000000, 0.00567696, 0.00000000],
        #        [0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00001795],
        #        [0.00000069, 0.33993085, 0.66006319, 0.00000000, 0.00000000, 0.00000528, 0.00000000, 0.00000000]])
    """

    assert reduction in ['mean', 'sum', 'none', None]
1901 1902 1903 1904 1905 1906 1907
    if not (group == False or group is None or hasattr(group, 'is_member')):
        raise ValueError(
            'Expected group is False, None or instance of paddle.distributed.collective.Group \
             (got group: {})'.format(group))
        return

    if hasattr(group, 'is_member') and not group.is_member():
1908 1909
        return

1910
    ring_id = 0
1911 1912
    rank = 0
    nranks = 1
1913 1914 1915 1916 1917 1918 1919 1920
    if group != False:
        ring_id = 0 if group is None else group.id
        if core.is_compiled_with_dist():
            parallel_env = paddle.distributed.ParallelEnv()
            global_rank = parallel_env.rank
            rank = global_rank if group is None else group.get_group_rank(
                global_rank)
            nranks = parallel_env.world_size if group is None else group.nranks
1921 1922 1923 1924 1925

    input_dims = len(list(logits.shape))
    label_dims = len(list(label.shape))
    if input_dims - 1 != label_dims and input_dims != label_dims:
        raise ValueError(
1926
            'Expected input_dims - 1 = label_dims or input_dims == label_dims\
1927 1928 1929 1930
             (got nput_dims{}, label_dims{})'.format(input_dims, label_dims))
    if input_dims - 1 == label_dims:
        label = paddle.unsqueeze(label, axis=-1)

1931
    if in_dygraph_mode():
1932 1933 1934 1935
        softmax, loss = _C_ops.margin_cross_entropy(logits, label,
                                                    return_softmax, ring_id,
                                                    rank, nranks, margin1,
                                                    margin2, margin3, scale)
1936 1937 1938 1939 1940 1941 1942 1943
        if reduction == 'mean':
            loss = paddle.mean(loss)
        elif reduction == 'sum':
            loss = paddle.sum(loss)
        if not return_softmax:
            return loss
        else:
            return loss, softmax
1944
    elif _in_legacy_dygraph():
1945
        softmax, loss = _legacy_C_ops.margin_cross_entropy(
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
            logits, label, 'ring_id', ring_id, 'rank', rank, 'nranks', nranks,
            'margin1', margin1, 'margin2', margin2, 'margin3', margin3, 'scale',
            scale, 'return_softmax', return_softmax)
        if reduction == 'mean':
            loss = paddle.mean(loss)
        elif reduction == 'sum':
            loss = paddle.sum(loss)
        if not return_softmax:
            return loss
        else:
            return loss, softmax

    op_type = 'margin_cross_entropy'
    helper = LayerHelper(op_type, **locals())
    softmax = helper.create_variable_for_type_inference(dtype=logits.dtype)
    loss = helper.create_variable_for_type_inference(dtype=logits.dtype)

    check_variable_and_dtype(logits, 'logits',
                             ['float16', 'float32', 'float64'],
                             'margin_cross_entropy')
    check_variable_and_dtype(label, 'label', ['int32', 'int64'],
                             'margin_cross_entropy')

1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987
    helper.append_op(type=op_type,
                     inputs={
                         'Logits': logits,
                         'Label': label
                     },
                     outputs={
                         'Softmax': softmax,
                         'Loss': loss
                     },
                     attrs={
                         'return_softmax': return_softmax,
                         'ring_id': ring_id,
                         'rank': rank,
                         'nranks': nranks,
                         'margin1': margin1,
                         'margin2': margin2,
                         'margin3': margin3,
                         'scale': scale,
                     })
1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999

    if reduction == 'mean':
        loss = paddle.mean(loss)
    elif reduction == 'sum':
        loss = paddle.sum(loss)

    if not return_softmax:
        return loss
    else:
        return loss, softmax


2000 2001 2002 2003
@deprecated(
    since="2.0.0",
    update_to="paddle.nn.functional.cross_entropy",
    level=1,
2004 2005 2006
    reason=
    ('Please notice that behavior of "paddle.nn.functional.softmax_with_cross_entropy" '
     'and "paddle.nn.functional.cross_entropy" is different.'))
2007 2008 2009 2010 2011 2012 2013
def softmax_with_cross_entropy(logits,
                               label,
                               soft_label=False,
                               ignore_index=-100,
                               numeric_stable_mode=True,
                               return_softmax=False,
                               axis=-1):
2014
    r"""
2015 2016
    This operator implements the cross entropy loss function with softmax. This function
    combines the calculation of the softmax operation and the cross entropy loss function
2017 2018 2019 2020 2021 2022
    to provide a more numerically stable gradient.

    Because this operator performs a softmax on logits internally, it expects
    unscaled logits. This operator should not be used with the output of
    softmax operator since that would produce incorrect results.

2023 2024 2025
    When the attribute :attr:`soft_label` is set :attr:`False`, this operators
    expects mutually exclusive hard labels, each sample in a batch is in exactly
    one class with a probability of 1.0. Each sample in the batch will have a
2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
    single label.

    The equation is as follows:

    1) Hard label (one-hot label, so every sample has exactly one class)

    .. math::
        \\loss_j=-\text{logits}_{label_j} +\log\left(\sum_{i=0}^{K}\exp(\text{logits}_i)\right), j = 1,..., K

    2) Soft label (each sample can have a distribution over all classes)

    .. math::
        \\loss_j= -\sum_{i=0}^{K}\text{label}_i\left(\text{logits}_i - \log\left(\sum_{i=0}^{K}\exp(\text{logits}_i)\right)\right), j = 1,...,K

    3) If :attr:`numeric_stable_mode` is :attr:`True`, softmax is calculated first by:

    .. math::
        \\max_j&=\max_{i=0}^{K}{\text{logits}_i} \\
                log\_max\_sum_j &= \log\sum_{i=0}^{K}\exp(logits_i - max_j)\\
                softmax_j &= \exp(logits_j - max_j - {log\_max\_sum}_j)

    and then cross entropy loss is calculated by softmax and label.

    Args:
        logits (Tensor): A multi-dimension ``Tensor`` , and the data type is float32 or float64. The input tensor of unscaled log probabilities.
        label (Tensor): The ground truth  ``Tensor`` , data type is the same
2052 2053 2054
            as the ``logits`` . If :attr:`soft_label` is set to :attr:`True`,
            Label is a ``Tensor``  in the same shape with :attr:`logits`.
            If :attr:`soft_label` is set to :attr:`True`, Label is a ``Tensor``
2055 2056 2057 2058 2059
            in the same shape with :attr:`logits` expect shape in dimension :attr:`axis` as 1.
        soft_label (bool, optional): A flag to indicate whether to interpretant the given
            labels as soft labels. Default False.
        ignore_index (int, optional): Specifies a target value that is ignored and does
                                      not contribute to the input gradient. Only valid
2060
                                      if :attr:`soft_label` is set to :attr:`False`.
2061 2062 2063
                                      Default: kIgnoreIndex(-100).
        numeric_stable_mode (bool, optional): A flag to indicate whether to use a more
                                              numerically stable algorithm. Only valid
2064 2065 2066
                                              when :attr:`soft_label` is :attr:`False`
                                              and GPU is used. When :attr:`soft_label`
                                              is :attr:`True` or CPU is used, the
2067 2068 2069 2070 2071
                                              algorithm is always numerically stable.
                                              Note that the speed may be slower when use
                                              stable algorithm. Default: True.
        return_softmax (bool, optional): A flag indicating whether to return the softmax
                                         along with the cross entropy loss. Default: False.
2072
        axis (int, optional): The index of dimension to perform softmax calculations. It
2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098
                              should be in range :math:`[-1, rank - 1]`, while :math:`rank`
                              is the rank of input :attr:`logits`. Default: -1.

    Returns:
        ``Tensor`` or Tuple of two ``Tensor`` : Return the cross entropy loss if \
                                                    `return_softmax` is False, otherwise the tuple \
                                                    (loss, softmax), softmax is in the same shape \
                                                    with input logits and cross entropy loss is in \
                                                    the same shape with input logits except shape \
                                                    in dimension :attr:`axis` as 1.

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np

            data = np.random.rand(128).astype("float32")
            label = np.random.rand(1).astype("int64")
            data = paddle.to_tensor(data)
            label = paddle.to_tensor(label)
            linear = paddle.nn.Linear(128, 100)
            x = linear(data)
            out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label)
            print(out)
    """
2099 2100 2101 2102 2103
    return fluid_softmax_with_cross_entropy(logits, label, soft_label,
                                            ignore_index, numeric_stable_mode,
                                            return_softmax, axis)


2104 2105 2106 2107
def cross_entropy(input,
                  label,
                  weight=None,
                  ignore_index=-100,
2108 2109 2110
                  reduction='mean',
                  soft_label=False,
                  axis=-1,
2111
                  use_softmax=True,
2112
                  name=None):
2113
    r"""
2114 2115 2116
    By default, this operator implements the cross entropy loss function with softmax. This function
    combines the calculation of the softmax operation and the cross entropy loss function
    to provide a more numerically stable computing.
2117

2118
    This operator will calculate the cross entropy loss function without softmax when use_softmax=False.
2119

2120 2121
    By default, this operator will calculate the mean of the result, and you can also affect
    the default behavior by using the reduction parameter. Please refer to the part of
2122
    parameters for details.
2123

2124
    This operator can be used to calculate the softmax cross entropy loss with soft and hard labels.
2125
    Where, the hard labels mean the actual label value, 0, 1, 2, etc.  And the soft labels
2126
    mean the probability of the actual label, 0.6, 0.8, 0.2, etc.
2127

2128
    The calculation of this operator includes the following two steps.
2129

2130
    - **1.softmax cross entropy**
2131

2132
        1. Hard label (each sample can only be assigned into one category)
2133

2134
        1.1. when use_softmax=True
2135

2136 2137
            .. math::
              \\loss_j=-\text{logits}_{label_j}+\log\left(\sum_{i=0}^{C}\exp(\text{logits}_i)\right) , j = 1,...,N
2138

2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
            where, N is the number of samples and C is the number of categories.

        1.2. when use_softmax=False

            .. math::
              \\loss_j=-\log\left({P}_{label_j}\right) , j = 1,...,N

            where, N is the number of samples and C is the number of categories, P is input(the output of softmax).


        2. Soft label (each sample is assigned to multiple categories with a certain probability, and the probability sum is 1).

        2.1. when use_softmax=True

            .. math::
              \\loss_j=-\sum_{i=0}^{C}\text{label}_i\left(\text{logits}_i-\log\left(\sum_{i=0}^{C}\exp(\text{logits}_i)\right)\right) , j = 1,...,N

            where, N is the number of samples and C is the number of categories.

        2.2. when use_softmax=False

            .. math::
              \\loss_j=-\sum_{j=0}^{C}\left({label}_j*\log\left({P}_{label_j}\right)\right) , j = 1,...,N

            where, N is the number of samples and C is the number of categories, P is input(the output of softmax).




    - **2. Weight and reduction processing**

        1. Weight

            If the ``weight`` parameter is ``None`` , go to the next step directly.

            If the ``weight`` parameter is not ``None`` , the cross entropy of each sample is weighted by weight
            according to soft_label = False or True as follows.

            1.1. Hard labels (soft_label = False)

            .. math::
2180
                \\loss_j=loss_j*weight[label_j]
2181

2182

2183 2184 2185 2186 2187 2188 2189
            1.2. Soft labels (soft_label = True)

             .. math::
                \\loss_j=loss_j*\sum_{i}\left(weight[label_i]*logits_i\right)

        2. reduction

2190
            2.1 if the ``reduction`` parameter is ``none``
2191 2192 2193

                Return the previous result directly

2194
            2.2 if the ``reduction`` parameter is ``sum``
2195 2196 2197 2198 2199 2200

                Return the sum of the previous results

            .. math::
               \\loss=\sum_{j}loss_j

2201 2202
            2.3 if the ``reduction`` parameter is ``mean`` , it will be processed according to
            the ``weight`` parameter as follows.
2203

2204
            2.3.1. If the  ``weight``  parameter is ``None``
2205 2206 2207

                   Return the average value of the previous results

2208
            .. math::
2209 2210 2211 2212 2213 2214 2215 2216
                \\loss=\sum_{j}loss_j/N

                  where, N is the number of samples and C is the number of categories.

            2.3.2. If the 'weight' parameter is not 'None', the weighted average value of the previous result will be returned

            1. Hard labels (soft_label = False)

2217
            .. math::
2218
                \\loss=\sum_{j}loss_j/\sum_{j}weight[label_j]
2219 2220 2221

            2. Soft labels (soft_label = True)

2222
            .. math::
2223
                \\loss=\sum_{j}loss_j/\sum_{j}\left(\sum_{i}weight[label_i]\right)
2224 2225


2226
    Parameters:
2227 2228 2229 2230

        - **input** (Tensor)

            Input tensor, the data type is float32, float64. Shape is
2231
        :math:`[N_1, N_2, ..., N_k, C]`, where C is number of classes ,  ``k >= 1`` .
2232

2233
            Note:
2234

2235
                1. when use_softmax=True, it expects unscaled logits. This operator should not be used with the
2236 2237 2238
                output of softmax operator, which will produce incorrect results.

                2. when use_softmax=False, it expects the output of softmax operator.
2239

2240 2241 2242 2243 2244 2245
        - **label** (Tensor)

            1. If soft_label=False, the shape is
            :math:`[N_1, N_2, ..., N_k]` or :math:`[N_1, N_2, ..., N_k, 1]`, k >= 1.
            the data type is int32, int64, float32, float64, where each value is [0, C-1].

2246
            2. If soft_label=True, the shape and data type should be same with ``input`` ,
2247 2248 2249 2250
            and the sum of the labels for each sample should be 1.

        - **weight** (Tensor, optional)

2251 2252
            a manual rescaling weight given to each class.
            If given, has to be a Tensor of size C and the data type is float32, float64.
2253 2254 2255 2256 2257
            Default is ``'None'`` .

        - **ignore_index** (int64, optional)

            Specifies a target value that is ignored
2258 2259
            and does not contribute to the loss. A negative value means that no label
            value needs to be ignored. Only valid when soft_label = False.
2260 2261 2262 2263 2264
            Default is ``-100`` .

        - **reduction** (str, optional)

            Indicate how to average the loss by batch_size,
2265 2266
            the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
            If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
H
Hui Zhang 已提交
2267
            If :attr:`size_average` is ``'sum'``, the reduced sum loss is returned.
2268 2269
            If :attr:`reduction` is ``'none'``, the unreduced loss is returned.
            Default is ``'mean'``.
2270

2271 2272
        - **soft_label** (bool, optional)

2273
            Indicate whether label is soft.
2274 2275 2276 2277
            Default is ``False``.

        - **axis** (int, optional)

2278 2279 2280
            The index of dimension to perform softmax calculations.
            It should be in range :math:`[-1, rank - 1]`, where :math:`rank` is the
            number of dimensions of input :attr:`input`.
2281 2282 2283 2284 2285 2286 2287
            Default is ``-1`` .

        - **use_softmax** (bool, optional)

            Indicate whether compute softmax before cross_entropy.
            Default is ``True``.

Z
zhiboniu 已提交
2288
        - **name** (str, optional)
2289 2290 2291

            The name of the operator. Default is ``None`` .
            For more information, please refer to :ref:`api_guide_Name` .
2292 2293 2294

    Returns:

2295 2296
        Tensor. Return the softmax cross_entropy loss of ``input`` and ``label``.
        The data type is the same as input.
2297

2298
        If :attr:`reduction` is ``'mean'`` or ``'sum'`` , the dimension of return value is ``1``.
2299

2300
        If :attr:`reduction` is ``'none'``:
C
Chen Long 已提交
2301

2302
        1. If soft_label = False, the dimension of return value is the same with ``label`` .
C
Chen Long 已提交
2303

2304
        2. if soft_label = True, the dimension of return value is :math:`[N_1, N_2, ..., N_k, 1]` .
2305 2306


2307
    Examples:
2308 2309

        .. code-block:: python
2310 2311

            # hard labels
2312 2313 2314 2315 2316
            import paddle
            paddle.seed(99999)
            N=100
            C=200
            reduction='mean'
2317
            input =  paddle.rand([N, C], dtype='float64')
2318
            label =  paddle.randint(0, C, shape=[N], dtype='int64')
2319 2320
            weight = paddle.rand([C], dtype='float64')

2321 2322 2323 2324 2325 2326 2327 2328
            cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
                weight=weight, reduction=reduction)
            dy_ret = cross_entropy_loss(
                                       input,
                                       label)
            print(dy_ret.numpy()) #[5.41993642]

        .. code-block:: python
2329 2330

            # soft labels
2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343
            import paddle
            paddle.seed(99999)
            axis = -1
            ignore_index = -100
            N = 4
            C = 3
            shape = [N, C]
            reduction='mean'
            weight = None
            logits = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0)
            labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0)
            labels /= paddle.sum(labels, axis=axis, keepdim=True)
            paddle_loss_mean = paddle.nn.functional.cross_entropy(
2344 2345 2346
                                                                  logits,
                                                                  labels,
                                                                  soft_label=True,
2347 2348 2349 2350
                                                                  axis=axis,
                                                                  weight=weight,
                                                                  reduction=reduction)
            print(paddle_loss_mean.numpy()) #[1.12908343]
C
Chen Long 已提交
2351

2352 2353 2354 2355
    """

    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
2356 2357 2358
            "The value of 'reduction' in softmax_cross_entropy"
            "should be 'sum', 'mean' or 'none', but received %s, which is not allowed."
            % reduction)
2359 2360 2361 2362 2363 2364
    if ignore_index > 0 and soft_label == True:
        raise ValueError(
            "When soft_label == True, the value of 'ignore_index' in softmax_cross_entropy"
            "should be '-100', but received %s, which is not allowed." %
            ignore_index)

2365
    input_dims = len(list(input.shape))
2366 2367 2368
    if input_dims == 0:
        raise ValueError('The dimention of input should be larger than zero!')

2369 2370
    label_dims = len(list(label.shape))
    if input_dims - 1 != label_dims and input_dims != label_dims:
2371
        raise ValueError(
2372 2373 2374 2375
            'Expected nput_dims - 1 = label_dims or input_dims == label_dims\
             (got nput_dims{}, label_dims{})'.format(input_dims, label_dims))
    if input_dims - 1 == label_dims:
        label = paddle.unsqueeze(label, axis=axis)
2376

2377
    if in_dygraph_mode():
H
HydrogenSulfate 已提交
2378
        if soft_label == False:
2379 2380
            valid_label = paddle.cast(label != ignore_index,
                                      dtype=label.dtype) * label
F
fwenguang 已提交
2381
        if core.is_compiled_with_npu() or core.is_compiled_with_mlu():
2382
            if soft_label == False:
2383
                _, _, out = _legacy_C_ops.softmax_with_cross_entropy(
2384 2385 2386 2387
                    input, valid_label, 'soft_label', soft_label,
                    'ignore_index', ignore_index, 'numeric_stable_mode', True,
                    'axis', axis, 'use_softmax', use_softmax)
            else:
2388
                _, _, out = _legacy_C_ops.softmax_with_cross_entropy(
2389 2390 2391
                    input, label, 'soft_label', soft_label, 'ignore_index',
                    ignore_index, 'numeric_stable_mode', True, 'axis', axis,
                    'use_softmax', use_softmax)
2392
        else:
2393 2394 2395
            _, out = _C_ops.cross_entropy_with_softmax(input, label, soft_label,
                                                       use_softmax, True,
                                                       ignore_index, axis)
2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413

        if weight is not None:

            # trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases.
            if soft_label == True:
                # chajchaj:
                # weight's shape is C, where C is class num.
                # for 1d case: label's shape is [N,C], weight_gather's shape is N.
                # for 2d case: label's shape is [N,H,W,C], weight_gather's shape is [N,H,W].
                weight_gather = paddle.matmul(x=paddle.cast(
                    label, weight.dtype),
                                              y=weight,
                                              transpose_x=False,
                                              transpose_y=True)
                out_shape = list(out.shape)
                weight_gather_reshape = reshape(weight_gather, shape=out_shape)
                out = paddle.cast(out, weight_gather_reshape.dtype)

2414
                out = _C_ops.multiply(out, weight_gather_reshape)
2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
            else:
                if input.shape[axis] != weight.shape[-1]:
                    raise ValueError(
                        "input's class_dimension({}) must equal to "
                        "weight's class_dimension({}) "
                        "when weight is provided" \
                            .format(input.shape[axis], weight.shape[-1]))

                ignore_weight_mask = paddle.cast((label != ignore_index),
                                                 out.dtype)
                if ignore_weight_mask.ndim > 1 and ignore_weight_mask.shape[
                        axis] == 1:
                    # TODO: Temporarily use squeeze instead of squeeze_
                    ignore_weight_mask = paddle.squeeze(ignore_weight_mask,
                                                        axis)
                if axis != -1 and axis != valid_label.ndim - 1:
                    temp_perm = list(range(axis % valid_label.ndim)) \
                                + list(range((axis % valid_label.ndim + 1), valid_label.ndim)) \
                                + [axis % valid_label.ndim]
2434
                    weight_gather = _C_ops.gather_nd(
2435 2436
                        weight, valid_label.transpose(temp_perm))
                else:
2437 2438 2439
                    weight_gather = _C_ops.gather_nd(weight, valid_label)
                weight_gather = _C_ops.multiply(weight_gather,
                                                ignore_weight_mask)
2440 2441 2442 2443
                input_shape = list(label.shape)
                weight_gather_reshape = reshape(weight_gather,
                                                shape=input_shape)
                out = paddle.cast(out, weight_gather_reshape.dtype)
2444
                out = _C_ops.multiply(out, weight_gather_reshape)
2445 2446 2447 2448 2449

        if reduction == "sum":
            #   because of fluid_softmax_with_cross_entropy op's inner logic,
            #   in the out tensor of this op, the loss of sample with class_index==ignore_index is 0
            #   so, reduce_sum all directly is ok
2450
            return _C_ops.sum(out, [], None, False)
2451 2452 2453 2454 2455 2456 2457 2458
        elif reduction == "mean":
            # 1. if weight==none,
            #     numerator: reduce_sum all loss directly is ok causeof fluid_softmax_with_cross_entropy's inner logic
            #     denominator: count sample num with class_index!=ignore_index
            # 2. else
            #     numerator: loss's weighted sum
            #     denominator: cal the sum of weight where the sample's class_index!=ignore_index
            if ignore_index >= 0:
2459
                out_sum = _C_ops.sum(out, [], None, False)
2460 2461 2462 2463 2464 2465
                # for each label[i],set 1 or 0, according to ignore_index
                # mask[i]=0, if label[i]==ignore_index
                # mask[i]=1, otherwise
                mask = (label != ignore_index)
                if weight is None:
                    mask = paddle.cast(mask, dtype=out_sum.dtype)
2466
                    count = _C_ops.sum(mask, [], None, False)
2467 2468 2469
                    ret = out_sum / (count + (count == 0.0))
                else:
                    mask = paddle.cast(mask, weight_gather_reshape.dtype)
2470 2471 2472
                    weight_ignored = _C_ops.multiply(mask,
                                                     weight_gather_reshape)
                    weight_sum = _C_ops.sum(weight_ignored, [], None, False)
2473 2474 2475
                    ret = out_sum / (weight_sum + (weight_sum == 0.0))
                return ret
            elif weight is not None:
2476 2477 2478
                out_sum = _C_ops.sum(out, [], None, False)
                total_weight = _C_ops.sum(weight_gather_reshape, [], None,
                                          False)
2479 2480
                return out_sum / (total_weight + (total_weight == 0.0))
            else:
2481
                return _C_ops.mean_all(out)
2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501

        else:
            if input_dims - 1 == label_dims:
                out = paddle.squeeze(out, axis=axis)
            return out

    elif _in_legacy_dygraph():
        if soft_label == False:
            valid_label = paddle.cast(label != ignore_index,
                                      dtype=label.dtype) * label
            label_min = paddle.min(valid_label)
            label_max = paddle.max(valid_label)
            if label_min < 0:
                raise ValueError("Target {} is out of lower bound.".format(
                    label_min.item()))
            if label_max >= input.shape[axis]:
                raise ValueError("Target {} is out of upper bound.".format(
                    label_max.item()))
        if core.is_compiled_with_npu() or core.is_compiled_with_mlu():
            if soft_label == False:
2502
                _, _, out = _legacy_C_ops.softmax_with_cross_entropy(
2503 2504 2505 2506
                    input, valid_label, 'soft_label', soft_label,
                    'ignore_index', ignore_index, 'numeric_stable_mode', True,
                    'axis', axis, 'use_softmax', use_softmax)
            else:
2507
                _, _, out = _legacy_C_ops.softmax_with_cross_entropy(
2508 2509 2510
                    input, label, 'soft_label', soft_label, 'ignore_index',
                    ignore_index, 'numeric_stable_mode', True, 'axis', axis,
                    'use_softmax', use_softmax)
2511
        else:
2512
            _, out = _legacy_C_ops.softmax_with_cross_entropy(
2513 2514 2515
                input, label, 'soft_label', soft_label, 'ignore_index',
                ignore_index, 'numeric_stable_mode', True, 'axis', axis,
                'use_softmax', use_softmax)
2516

2517
        if weight is not None:
2518

H
HydrogenSulfate 已提交
2519
            # trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases.
2520 2521
            if soft_label == True:
                # chajchaj:
H
HydrogenSulfate 已提交
2522
                # weight's shape is C, where C is class num.
2523 2524
                # for 1d case: label's shape is [N,C], weight_gather's shape is N.
                # for 2d case: label's shape is [N,H,W,C], weight_gather's shape is [N,H,W].
2525 2526 2527 2528 2529
                weight_gather = paddle.matmul(x=paddle.cast(
                    label, weight.dtype),
                                              y=weight,
                                              transpose_x=False,
                                              transpose_y=True)
2530 2531 2532 2533
                out_shape = list(out.shape)
                weight_gather_reshape = reshape(weight_gather, shape=out_shape)
                out = paddle.cast(out, weight_gather_reshape.dtype)

2534
                out = _legacy_C_ops.elementwise_mul(out, weight_gather_reshape)
2535 2536

            else:
2537 2538 2539 2540 2541 2542 2543
                if input.shape[axis] != weight.shape[-1]:
                    raise ValueError(
                        "input's class_dimension({}) must equal to "
                        "weight's class_dimension({}) "
                        "when weight is provided" \
                            .format(input.shape[axis], weight.shape[-1]))

H
HydrogenSulfate 已提交
2544 2545
                ignore_weight_mask = paddle.cast((label != ignore_index),
                                                 out.dtype)
H
HydrogenSulfate 已提交
2546
                if ignore_weight_mask.ndim > 1 and ignore_weight_mask.shape[
2547
                        axis] == 1:
H
HydrogenSulfate 已提交
2548
                    # TODO: Temporarily use squeeze instead of squeeze_
H
HydrogenSulfate 已提交
2549 2550
                    ignore_weight_mask = paddle.squeeze(ignore_weight_mask,
                                                        axis)
H
HydrogenSulfate 已提交
2551
                if axis != -1 and axis != valid_label.ndim - 1:
2552
                    temp_perm = list(range(axis % valid_label.ndim)) \
2553
                                + list(range((axis % valid_label.ndim + 1), valid_label.ndim)) \
H
HydrogenSulfate 已提交
2554
                                + [axis % valid_label.ndim]
2555
                    weight_gather = _legacy_C_ops.gather_nd(
2556 2557
                        weight, valid_label.transpose(temp_perm))
                else:
2558 2559 2560
                    weight_gather = _legacy_C_ops.gather_nd(weight, valid_label)
                weight_gather = _legacy_C_ops.elementwise_mul(
                    weight_gather, ignore_weight_mask)
2561
                input_shape = list(label.shape)
2562 2563
                weight_gather_reshape = reshape(weight_gather,
                                                shape=input_shape)
2564
                out = paddle.cast(out, weight_gather_reshape.dtype)
2565
                out = _legacy_C_ops.elementwise_mul(out, weight_gather_reshape)
2566

2567
        if reduction == "sum":
H
HydrogenSulfate 已提交
2568
            #   because of fluid_softmax_with_cross_entropy op's inner logic,
2569 2570
            #   in the out tensor of this op, the loss of sample with class_index==ignore_index is 0
            #   so, reduce_sum all directly is ok
2571
            return _legacy_C_ops.reduce_sum(out, 'reduce_all', True)
2572
        elif reduction == "mean":
H
HydrogenSulfate 已提交
2573 2574 2575 2576 2577 2578
            # 1. if weight==none,
            #     numerator: reduce_sum all loss directly is ok causeof fluid_softmax_with_cross_entropy's inner logic
            #     denominator: count sample num with class_index!=ignore_index
            # 2. else
            #     numerator: loss's weighted sum
            #     denominator: cal the sum of weight where the sample's class_index!=ignore_index
S
sneaxiy 已提交
2579
            if ignore_index >= 0:
2580
                out_sum = _legacy_C_ops.reduce_sum(out, 'reduce_all', True)
H
HydrogenSulfate 已提交
2581 2582 2583
                # for each label[i],set 1 or 0, according to ignore_index
                # mask[i]=0, if label[i]==ignore_index
                # mask[i]=1, otherwise
2584
                mask = (label != ignore_index)
2585
                if weight is None:
2586
                    mask = paddle.cast(mask, dtype=out_sum.dtype)
2587
                    count = _legacy_C_ops.reduce_sum(mask, 'reduce_all', True)
2588
                    ret = out_sum / (count + (count == 0.0))
2589 2590
                else:
                    mask = paddle.cast(mask, weight_gather_reshape.dtype)
2591
                    weight_ignored = _legacy_C_ops.elementwise_mul(
2592
                        mask, weight_gather_reshape)
2593 2594
                    weight_sum = _legacy_C_ops.reduce_sum(
                        weight_ignored, 'reduce_all', True)
2595
                    ret = out_sum / (weight_sum + (weight_sum == 0.0))
2596 2597
                return ret
            elif weight is not None:
2598 2599 2600
                out_sum = _legacy_C_ops.reduce_sum(out, 'reduce_all', True)
                total_weight = _legacy_C_ops.reduce_sum(weight_gather_reshape,
                                                        'reduce_all', True)
2601
                return out_sum / (total_weight + (total_weight == 0.0))
2602
            else:
2603
                return _legacy_C_ops.mean(out)
2604
        else:
2605 2606
            if input_dims - 1 == label_dims:
                out = paddle.squeeze(out, axis=axis)
2607
            return out
2608

F
feifei-111 已提交
2609
    check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
2610 2611
                             'softmax_cross_entropy')
    check_variable_and_dtype(
2612 2613
        label, 'label',
        ['uint8', 'int8', 'int16', 'int32', 'int64', 'float32', 'float64'],
2614
        'softmax_cross_entropy')
2615 2616 2617 2618 2619
    attrs = {
        'soft_label': soft_label,
        'ignore_index': ignore_index,
        'numeric_stable_mode': True,
        'axis': axis,
2620
        'use_softmax': use_softmax
2621 2622 2623 2624
    }
    helper = LayerHelper('softmax_with_cross_entropy', **locals())
    softmax = helper.create_variable_for_type_inference(dtype=input.dtype)
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
2625 2626 2627 2628 2629

    outputs = {'Softmax': softmax, 'Loss': out}
    if core.is_compiled_with_npu() or core.is_compiled_with_mlu():
        backprop = helper.create_variable_for_type_inference(dtype=input.dtype)
        outputs['Backprop'] = backprop
2630 2631 2632 2633 2634 2635 2636
    helper.append_op(type='softmax_with_cross_entropy',
                     inputs={
                         'Logits': input,
                         'Label': label
                     },
                     outputs=outputs,
                     attrs=attrs)
2637

2638
    if weight is not None:
2639 2640
        check_variable_and_dtype(weight, 'weight', ['float32', 'float64'],
                                 'softmax_cross_entropy')
2641
        weight_name = name if reduction == 'none' else None
2642 2643
        if soft_label == True:
            # chajchaj:
H
HydrogenSulfate 已提交
2644
            # trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases.
2645 2646 2647
            # weight's shape is C, where C is class num.
            # for 1d case: label's shape is [N,C], weight_gather's shape is N.
            # for 2d case: label's shape is [N,H,W,C], weight_gather's shape is [N,H,W].
2648 2649 2650 2651
            weight_gather = paddle.matmul(x=paddle.cast(label, weight.dtype),
                                          y=weight,
                                          transpose_x=False,
                                          transpose_y=True)
2652 2653 2654 2655 2656

            out_shape = list(out.shape)
            weight_gather_reshape = reshape(weight_gather, shape=out_shape)
            out = paddle.cast(out, weight_gather_reshape.dtype)
        else:
2657 2658
            if input.shape[axis] != weight.shape[-1]:
                raise ValueError("input's class_dimension({}) must equal to "
2659 2660
                                 "weight's class_dimension({}) "
                                 "when weight is provided" \
2661
                                 .format(input.shape[axis], weight.shape[-1]))
H
HydrogenSulfate 已提交
2662

H
HydrogenSulfate 已提交
2663
            valid_label = paddle.multiply(
2664
                paddle.cast(label != ignore_index, dtype=label.dtype), label)
H
HydrogenSulfate 已提交
2665 2666
            ignore_weight_mask = paddle.cast((label != ignore_index),
                                             input.dtype)
H
HydrogenSulfate 已提交
2667
            if ignore_weight_mask.ndim > 1 and ignore_weight_mask.shape[
2668 2669
                    axis] == 1:
                ignore_weight_mask = paddle.squeeze(ignore_weight_mask, axis)
H
HydrogenSulfate 已提交
2670
            if axis != -1 and axis != valid_label.ndim - 1:
2671
                temp_perm = list(range(axis % valid_label.ndim)) \
H
HydrogenSulfate 已提交
2672
                            + list(range((axis % valid_label.ndim + 1), valid_label.ndim)) \
2673 2674 2675 2676 2677
                            + [axis % valid_label.ndim]
                weight_gather = paddle.gather_nd(
                    weight, paddle.transpose(valid_label, temp_perm))
            else:
                weight_gather = paddle.gather_nd(weight, valid_label)
H
HydrogenSulfate 已提交
2678 2679
            weight_gather = paddle.multiply(weight_gather, ignore_weight_mask)

2680 2681
            input_shape = list(label.shape)
            weight_gather_reshape = reshape(weight_gather, shape=input_shape)
2682
        out = paddle.multiply(out, weight_gather_reshape, name=weight_name)
2683

2684 2685 2686
    if reduction == "sum":
        return paddle.sum(out, name=name)
    elif reduction == "mean":
S
sneaxiy 已提交
2687
        if ignore_index >= 0:
2688
            out_sum = paddle.sum(out, name=name)
H
HydrogenSulfate 已提交
2689 2690 2691
            # for each label[i],set 1 or 0, according to ignore_index
            # mask[i]=0, if label[i]==ignore_index
            # mask[i]=1, otherwise
2692 2693 2694 2695
            mask = (label != ignore_index)
            if (weight is None):
                mask = paddle.cast(mask, dtype=out_sum.dtype)
                count = paddle.sum(mask, name=name)
2696
                ret = out_sum / (count + (count == 0.0))
2697 2698 2699 2700
            else:
                mask = paddle.cast(mask, weight_gather_reshape.dtype)
                weight_ignored = paddle.multiply(mask, weight_gather_reshape)
                weight_sum = paddle.sum(weight_ignored, name=name)
2701
                ret = out_sum / (weight_sum + (weight_sum == 0.0))
2702 2703
            return ret
        elif weight is not None:
2704 2705
            out_sum = paddle.sum(out, name=name)
            total_weight = paddle.sum(weight_gather_reshape)
2706
            return out_sum / (total_weight + (total_weight == 0.0))
2707 2708
        else:
            return paddle.mean(out, name=name)
2709

2710
    else:
2711 2712 2713
        if input_dims - 1 == label_dims:
            out = paddle.squeeze(out, axis=axis)

2714
        return out
2715 2716 2717 2718 2719 2720 2721 2722 2723


def sigmoid_focal_loss(logit,
                       label,
                       normalizer=None,
                       alpha=0.25,
                       gamma=2.0,
                       reduction='sum',
                       name=None):
2724
    r"""
2725 2726 2727 2728 2729 2730
    `Focal Loss <https://arxiv.org/abs/1708.02002>`_ is proposed to address the
    foreground-background class imbalance for classification tasks. It down-weights
    easily-classified examples and thus focuses training on hard examples. For example,
    it is used in one-stage object detection where the foreground-background class
    imbalance is extremely high.

2731
    This operator measures focal loss function as follows:
2732 2733

    .. math::
2734
           Out = -Labels * alpha * {(1 - \sigma(Logit))}^{gamma}\log(\sigma(Logit)) - (1 - Labels) * (1 - alpha) * {\sigma(Logit)}^{gamma}\log(1 - \sigma(Logit))
2735

2736
    We know that :math:`\sigma(Logit) = \frac{1}{1 + \exp(-Logit)}`.
2737 2738 2739 2740 2741

    Then, if :attr:`normalizer` is not None, this operator divides the
    normalizer tensor on the loss `Out`:

    .. math::
2742
           Out = \frac{Out}{normalizer}
2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759

    Finally, this operator applies reduce operation on the loss.
    If :attr:`reduction` set to ``'none'``, the operator will return the original loss `Out`.
    If :attr:`reduction` set to ``'mean'``, the reduced mean loss is :math:`Out = MEAN(Out)`.
    If :attr:`reduction` set to ``'sum'``, the reduced sum loss is :math:`Out = SUM(Out)`.

    Note that the target ``label`` is 0 for the negative class and is 1 for the positive class.

    Args:
        logit (Tensor): The input logit tensor. The shape is [N, *], where N is batch_size,
            `*` means any number of additional dimensions. The ``logit`` is usually the
            output of a convolution layer. Available dtype is float32, float64.
        label (Tensor): The target label tensor with the same shape as
            ``logit``. The target label whose value should be numbers between 0 and 1.
            Available dtype is float32, float64.
        normalizer (Tensor, optional): The number normalizes the focal loss. It has to be
            a 1-D Tensor whose shape is `[1, ]`. The data type is float32, float64.
2760
            For object detection task, it is the number of positive samples.
2761 2762
            If set to None, the focal loss will not be normalized. Default is None.
        alpha(int|float, optional): Hyper-parameter to balance the positive and negative example,
2763
            it should be between 0 and 1.  Default value is set to 0.25.
2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787
        gamma(int|float, optional): Hyper-parameter to modulate the easy and hard examples.
            Default value is set to 2.0.
        reduction (str, optional): Indicate how to average the loss by batch_size,
            the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
            If :attr:`reduction` is ``'none'``, the unreduced loss is returned;
            If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
            If :attr:`reduction` is ``'sum'``, the summed loss is returned.
            Default is ``'sum'``.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, if :attr:`reduction` is ``'mean'`` or ``'sum'``, the out shape is :math:`[1]`, otherwise the shape is the same as ``logit``. The same dtype as ``logit`` tensor.

    Examples:

        .. code-block:: python

            import paddle

            logit = paddle.to_tensor([[0.97, 0.91, 0.03], [0.55, 0.43, 0.71]], dtype='float32')
            label = paddle.to_tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype='float32')
            one = paddle.to_tensor([1.], dtype='float32')
            fg_label = paddle.greater_equal(label, one)
2788
            fg_num = paddle.sum(paddle.cast(fg_label, dtype='float32'))
2789
            output = paddle.nn.functional.sigmoid_focal_loss(logit, label, normalizer=fg_num)
2790
            print(output)  # [0.65782464]
2791 2792 2793 2794 2795 2796 2797 2798 2799

    """
    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
            "The value of 'reduction' in sigmoid_focal_loss "
            "should be 'sum', 'mean' or 'none', but received %s, which is not allowed."
            % reduction)

    if normalizer is not None:
2800 2801
        check_variable_and_dtype(normalizer, 'normalizer',
                                 ['float32', 'float64'], 'sigmoid_focal_loss')
2802 2803 2804 2805
        normalizer_shape = list(normalizer.shape)
        normalizer_dims = len(normalizer_shape)
        if normalizer_dims > 1:
            raise ValueError(
2806 2807
                "Expected one dimension of normalizer in sigmoid_focal_loss but got {}."
                .format(normalizer_dims))
2808

2809 2810
    if in_dygraph_mode():
        place = _current_expected_place()
2811
        one = _C_ops.full(logit.shape, float(1.0), logit.dtype, place)
2812

2813 2814
        loss = _C_ops.sigmoid_cross_entropy_with_logits(logit, label, False,
                                                        -100)
2815

2816
        pred = _C_ops.sigmoid(logit)
2817

2818 2819 2820 2821
        p_t = _C_ops.add(
            _C_ops.multiply(pred, label),
            _C_ops.multiply(_C_ops.subtract(one, pred),
                            _C_ops.subtract(one, label)))
2822 2823

        alpha = fluid.dygraph.base.to_variable([alpha], dtype=loss.dtype)
2824 2825 2826 2827 2828
        alpha_t = _C_ops.add(
            _C_ops.multiply(alpha, label),
            _C_ops.multiply(_C_ops.subtract(one, alpha),
                            _C_ops.subtract(one, label)))
        loss = _C_ops.multiply(alpha_t, loss)
2829 2830

        gamma = fluid.dygraph.base.to_variable([gamma], dtype=loss.dtype)
2831 2832
        gamma_t = _C_ops.pow(_C_ops.subtract(one, p_t), gamma)
        loss = _C_ops.multiply(gamma_t, loss)
2833 2834

        if normalizer is not None:
2835
            loss = _C_ops.divide(loss, normalizer)
2836 2837

        if reduction == "sum":
2838
            return _C_ops.sum(loss, [], None, False)
2839
        elif reduction == "mean":
2840
            return _C_ops.mean_all(loss)
2841 2842 2843 2844 2845

        return loss

    elif _in_legacy_dygraph():
        one = _varbase_creator(dtype=logit.dtype)
2846 2847 2848 2849
        _legacy_C_ops.fill_constant(one, 'value', float(1.0), 'force_cpu',
                                    False, 'dtype', one.dtype, 'str_value',
                                    '1.0', 'shape', logit.shape)
        loss = _legacy_C_ops.sigmoid_cross_entropy_with_logits(logit, label)
2850

2851
        pred = _legacy_C_ops.sigmoid(logit)
2852

2853 2854 2855 2856 2857
        p_t = _legacy_C_ops.elementwise_add(
            _legacy_C_ops.elementwise_mul(pred, label),
            _legacy_C_ops.elementwise_mul(
                _legacy_C_ops.elementwise_sub(one, pred),
                _legacy_C_ops.elementwise_sub(one, label)))
2858 2859

        alpha = fluid.dygraph.base.to_variable([alpha], dtype=loss.dtype)
2860 2861 2862 2863 2864 2865
        alpha_t = _legacy_C_ops.elementwise_add(
            _legacy_C_ops.elementwise_mul(alpha, label),
            _legacy_C_ops.elementwise_mul(
                _legacy_C_ops.elementwise_sub(one, alpha),
                _legacy_C_ops.elementwise_sub(one, label)))
        loss = _legacy_C_ops.elementwise_mul(alpha_t, loss)
2866 2867

        gamma = fluid.dygraph.base.to_variable([gamma], dtype=loss.dtype)
2868 2869 2870
        gamma_t = _legacy_C_ops.elementwise_pow(
            _legacy_C_ops.elementwise_sub(one, p_t), gamma)
        loss = _legacy_C_ops.elementwise_mul(gamma_t, loss)
2871 2872

        if normalizer is not None:
2873
            loss = _legacy_C_ops.elementwise_div(loss, normalizer)
2874 2875

        if reduction == "sum":
2876
            return _legacy_C_ops.reduce_sum(loss, 'reduce_all', True)
2877
        elif reduction == "mean":
2878
            return _legacy_C_ops.mean(loss)
2879 2880 2881

        return loss

2882 2883 2884 2885
    check_variable_and_dtype(logit, 'logit', ['float32', 'float64'],
                             'sigmoid_focal_loss')
    check_variable_and_dtype(label, 'label', ['float32', 'float64'],
                             'sigmoid_focal_loss')
2886 2887 2888 2889 2890 2891 2892

    bce_name = None
    if reduction == 'none' and normalizer is None:
        bce_name = name
    loss = paddle.nn.functional.binary_cross_entropy_with_logits(
        logit, label, reduction='none', name=bce_name)

Z
zhiboniu 已提交
2893
    pred = paddle.nn.functional.sigmoid(logit)
2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911
    p_t = pred * label + (1 - pred) * (1 - label)

    alpha_t = alpha * label + (1 - alpha) * (1 - label)
    loss = paddle.multiply(alpha_t, loss)

    gamma_t = paddle.pow((1 - p_t), gamma)
    loss = paddle.multiply(gamma_t, loss)

    if normalizer is not None:
        normalizer_name = name if reduction == 'none' else None
        loss = paddle.divide(loss, normalizer, name=normalizer_name)

    if reduction == 'mean':
        loss = paddle.mean(loss, name=name)
    elif reduction == 'sum':
        loss = paddle.sum(loss, name=name)

    return loss
2912 2913


Y
yangguohao 已提交
2914 2915 2916 2917 2918 2919
def multi_label_soft_margin_loss(input,
                                 label,
                                 weight=None,
                                 reduction="mean",
                                 name=None):
    r"""
2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932
    Calculate a multi-class multi-classification
    hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`)
    and output :math:`y` (which is a 2D `Tensor` of target class indices).
    For each sample in the mini-batch:

    .. math::
        \text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)}

    where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`, \
    :math:`y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}`, \
    :math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \
    and :math:`i \neq y[j]` for all :math:`i` and :math:`j`.
    :math:`y` and :math:`x` must have the same size.
Y
yangguohao 已提交
2933

2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947
    Parameters:
        input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1.
        label (Tensor): Label tensor, the data type is float32 or float64. The shape of label is the same as the shape of input.
        weight (Tensor,optional): a manual rescaling weight given to each class.
                If given, has to be a Tensor of size C and the data type is float32, float64.
                Default is ``'None'`` .
        reduction (str, optional): Indicate how to average the loss by batch_size,
                the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
                If :attr:`reduction` is ``'none'``, the unreduced loss is returned;
                If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
                If :attr:`reduction` is ``'sum'``, the summed loss is returned.
                Default: ``'mean'``
        name (str, optional): Name for the operation (optional, default is None).
                For more information, please refer to :ref:`api_guide_Name`.
Y
yangguohao 已提交
2948

2949 2950 2951 2952 2953
    Shape:
        input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements.
        label: N-D Tensor, same shape as the input.
        weight:N-D Tensor, the shape is [N,1]
        output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input.
Y
yangguohao 已提交
2954

2955 2956
    Returns:
        Tensor, The tensor variable storing the multi_label_soft_margin_loss of input and label.
Y
yangguohao 已提交
2957

2958 2959
    Examples:
        .. code-block:: python
Y
yangguohao 已提交
2960

2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971
            import paddle
            import paddle.nn.functional as F
            input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32)
            # label elements in {1., -1.}
            label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32)
            loss = F.multi_label_soft_margin_loss(input, label, reduction='none')
            print(loss)
            # Tensor([3.49625897, 0.71111226, 0.43989015])
            loss = F.multi_label_soft_margin_loss(input, label, reduction='mean')
            print(loss)
            # Tensor([1.54908717])
Y
yangguohao 已提交
2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006
    """
    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
            "'reduction' in 'multi_label_soft_margin_loss' should be 'sum', 'mean' or 'none', "
            "but received {}.".format(reduction))

    if not (input.shape == label.shape):
        raise ValueError("The input and label should have same dimension,"
                         "but received {}!={}".format(input.shape, label.shape))

    if not _non_static_mode():
        check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                                 'multilabel_soft_margin_loss')
        check_variable_and_dtype(label, 'label', ['float32', 'float64'],
                                 'multilabel_soft_margin_loss')

    loss = -(label * paddle.nn.functional.log_sigmoid(input) +
             (1 - label) * paddle.nn.functional.log_sigmoid(-input))

    if weight is not None:
        if not _non_static_mode():
            check_variable_and_dtype(weight, 'weight', ['float32', 'float64'],
                                     'multilabel_soft_margin_loss')
        loss = loss * weight

    loss = loss.mean(axis=-1)  # only return N loss values

    if reduction == "none":
        return loss
    elif reduction == "mean":
        return paddle.mean(loss)
    elif reduction == "sum":
        return paddle.sum(loss)


3007 3008
def hinge_embedding_loss(input, label, margin=1.0, reduction='mean', name=None):
    r"""
3009
    Calculates hinge_embedding_loss. Measures the loss given an input tensor :math:`x` and a labels tensor :math:`y`(containing 1 or -1).
3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085
    This is usually used for measuring whether two inputs are similar or dissimilar, e.g. using the L1 pairwise distance as :math:`x`,
    and is typically used for learning nonlinear embeddings or semi-supervised learning.

    The loss function for :math:`n`-th sample in the mini-batch is

    .. math::
        l_n = \begin{cases}
            x_n, & \text{if}\; y_n = 1,\\
            \max \{0, \Delta - x_n\}, & \text{if}\; y_n = -1,
        \end{cases}

    and the total loss functions is

    .. math::
        \ell(x, y) = \begin{cases}
            \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\
            \operatorname{sum}(L),  & \text{if reduction} = \text{'sum'.}
        \end{cases}

    where :math:`L = \{l_1,\dots,l_N\}^\top`.

    Parameters:
        input (Tensor): Input tensor, the data type is float32 or float64.
            the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64.
        label (Tensor): Label tensor containing 1 or -1, the data type is float32 or float64.
            The shape of label is the same as the shape of input.
        margin (float, optional): Specifies the hyperparameter margin to be used.
            The value determines how large the input need to be to calculate in
            hinge_embedding_loss. When label is -1, Input smaller than margin are minimized with hinge_embedding_loss.
            Default = 1.0
        reduction (str, optional): Indicate how to average the loss by batch_size.
            the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
            If :attr:`reduction` is ``'none'``, the unreduced loss is returned;
            If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
            If :attr:`reduction` is ``'sum'``, the summed loss is returned.
            Default: ``'mean'``
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Shape:

        input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64. The sum operationoperates over all the elements.

        label: N-D Tensor, same shape as the input. tensor elements should containing 1 or -1, the data type is float32 or float64.

        output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input.

    Returns:
        Tensor. The tensor variable storing the hinge_embedding_loss of input and label.

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32)
            # label elements in {1., -1.}
            label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32)

            loss = F.hinge_embedding_loss(input, label, margin=1.0, reduction='none')
            print(loss)
            # Tensor([[0., -2., 0.],
            #         [0., -1., 2.],
            #         [1., 1., 1.]])

            loss = F.hinge_embedding_loss(input, label, margin=1.0, reduction='mean')
            print(loss)
            # Tensor([0.22222222])
    """

    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
            "'reduction' in 'hinge_embedding_loss' should be 'sum', 'mean' or 'none', "
            "but received {}.".format(reduction))

3086
    if not _non_static_mode():
3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101
        check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                                 'hinge_embedding_loss')
        check_variable_and_dtype(label, 'label', ['float32', 'float64'],
                                 'hinge_embedding_loss')

    zero_ = paddle.zeros([1], dtype=input.dtype)
    loss = paddle.where(label == 1., input, zero_) + \
           paddle.where(label == -1., paddle.nn.functional.relu(margin - input), zero_)

    if reduction == 'mean':
        return paddle.mean(loss, name=name)
    elif reduction == 'sum':
        return paddle.sum(loss, name=name)
    elif reduction == 'none':
        return loss
3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209


def cosine_embedding_loss(input1,
                          input2,
                          label,
                          margin=0,
                          reduction='mean',
                          name=None):
    r"""
    This operator computes the cosine embedding loss of Tensor ``input1``, ``input2`` and ``label`` as follows.

    If label = 1, then the loss value can be calculated as follow:

    .. math::
        Out = 1 - cos(input1, input2)

    If label = -1, then the loss value can be calculated as follow:

    .. math::
        Out = max(0, cos(input1, input2)) - margin

    The operator cos can be described as follow:
     .. math::
        cos(x1, x2) = \frac{x1 \cdot{} x2}{\Vert x1 \Vert_2 * \Vert x2 \Vert_2}

     Parameters:
        input1 (Tensor): tensor with shape: [N, M] or [M], 'N' means batch size, 'M' means the length of input array.
                         Available dtypes are float32, float64.
        input2 (Tensor): tensor with shape: [N, M] or [M], 'N' means batch size, 'M' means the length of input array.
                         Available dtypes are float32, float64.
        label (Tensor): tensor with shape: [N] or [1]. The target labels values should be -1 or 1.
                         Available dtypes are int32, int64, float32, float64.
        margin (float, optional): Should be a number from :math:`-1` to :math:`1`,
                         :math:`0` to :math:`0.5` is suggested. If :attr:`margin` is missing, the
                         default value is :math:`0`.
        reduction (string, optional): Specifies the reduction to apply to the output:
                         ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
                         ``'mean'``: the sum of the output will be divided by the number of elements in the output
                         ``'sum'``: the output will be summed.
        name (str, optional): Name for the operation (optional, default is None).
                         For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, the cosine embedding Loss of Tensor ``input1`` ``input2`` and ``label``.
            If `reduction` is ``'none'``, the shape of output loss is [N], the same as ``input`` .
            If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1].

    Examples:
        .. code-block:: python

            import paddle

            input1 = paddle.to_tensor([[1.6, 1.2, -0.5], [3.2, 2.6, -5.8]], 'float32')
            input2 = paddle.to_tensor([[0.5, 0.5, -1.8], [2.3, -1.4, 1.1]], 'float32')
            label = paddle.to_tensor([1, -1], 'int64')

            output = paddle.nn.functional.cosine_embedding_loss(input1, input2, label, margin=0.5, reduction='mean')
            print(output)  # [0.21155193]

            output = paddle.nn.functional.cosine_embedding_loss(input1, input2, label, margin=0.5, reduction='sum')
            print(output)  # [0.42310387]

            output = paddle.nn.functional.cosine_embedding_loss(input1, input2, label, margin=0.5, reduction='none')
            print(output)  # [0.42310387, 0.        ]

    """
    if len(label.shape) != 1:
        raise ValueError(
            "1D target tensor expected, multi-target not supported")

    if input1.shape != input2.shape:
        raise ValueError(
            "the shape of input tensor 1 should be equal to input tensor 2, but found inputs with "
            "different sizes")

    if len(input1.shape) > 2:
        raise ValueError(
            "1D target tensor expects 1D or 2D input tensors, but found inputs with different sizes"
        )

    if input1.dtype not in [paddle.float32, paddle.float64]:
        raise ValueError(
            "The data type of input Variable must be 'float32' or 'float64'")
    if label.dtype not in [
            paddle.int32, paddle.int64, paddle.float32, paddle.float64
    ]:
        raise ValueError(
            "The data type of label Variable must be 'int32', 'int64', 'float32', 'float64'"
        )

    prod_sum = (input1 * input2).sum(axis=-1)
    mag_square1 = paddle.square(input1).sum(axis=-1) + 10e-12
    mag_square2 = paddle.square(input2).sum(axis=-1) + 10e-12
    denom = paddle.sqrt(mag_square1 * mag_square2)
    cos = prod_sum / denom
    zeros = paddle.zeros_like(cos)
    pos = 1 - cos
    neg = paddle.clip(cos - margin, min=0)
    out_pos = paddle.where(label == 1, pos, zeros)
    out_neg = paddle.where(label == -1, neg, zeros)
    out = out_pos + out_neg

    if reduction == 'none':
        return out
    if reduction == 'mean':
        return paddle.mean(out, name=name)
    elif reduction == 'sum':
        return paddle.sum(out, name=name)
Y
yangguohao 已提交
3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238


def triplet_margin_with_distance_loss(input,
                                      positive,
                                      negative,
                                      distance_function=None,
                                      margin=1.0,
                                      swap=False,
                                      reduction='mean',
                                      name=None):
    r"""
    Measures the triplet loss given an input
    tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`.
    This is used for measuring a relative similarity between samples. A triplet
    is composed by `input`, `positive` and `negative` (i.e., `input`, `positive examples` and `negative
    examples` respectively). The shapes of all input tensors should be
    :math:`(N, D)`.

    The loss function for each sample in the mini-batch is:

    .. math::
        L(input, pos, neg) = \max \{d(input_i, pos_i) - d(input_i, neg_i) + {\rm margin}, 0\}


    where the default distance function

    .. math::
        d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p

3239
    or user can defined their own distance functions. `margin` is a nonnegative margin representing the minimum difference
Y
yangguohao 已提交
3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254
    between the positive and negative distances that is required for the loss to be 0. If `swap` is true, it will compare distance of (input, negative) with
    distance of (negative, positive) and change it to the smaller one. For more details see http://www.bmva.org/bmvc/2016/papers/paper119/paper119.pdf.

    Parameters:

        input (Tensor):Input tensor, the data type is float32 or float64.
            the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64.

        positive (Tensor):Positive tensor, the data type is float32 or float64.
            The shape of label is the same as the shape of input.

        negative (Tensor):Negative tensor, the data type is float32 or float64.
            The shape of label is the same as the shape of input.

        distance_function (callable, optional): Quantifies the distance between two tensors. if not specified, 2 norm functions will be used.
3255

3256 3257
        margin (float, optional): A nonnegative margin representing the minimum difference
            between the positive and negative distances required for the loss to be 0. Default value is :math:`1`.
3258

Y
yangguohao 已提交
3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269
        swap (bool, optional):The distance swap changes the negative distance to the swap distance (distance between positive samples
                and negative samples) if swap distance smaller than negative distance. Default: ``False``.

        reduction (str, optional):Indicate how to average the loss by batch_size.
            the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
            If :attr:`reduction` is ``'none'``, the unreduced loss is returned;
            If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
            If :attr:`reduction` is ``'sum'``, the summed loss is returned.
            Default: ``'mean'``
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
3270

Y
yangguohao 已提交
3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336
    Returns:
        Output: Tensor. The tensor variable storing the triplet_margin_with_distance_loss of input and positive and negative.

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            input = paddle.to_tensor([[1, 5, 3], [0, 3, 2], [1, 4, 1]], dtype=paddle.float32)
            positive= paddle.to_tensor([[5, 1, 2], [3, 2, 1], [3, -1, 1]], dtype=paddle.float32)
            negative = paddle.to_tensor([[2, 1, -3], [1, 1, -1], [4, -2, 1]], dtype=paddle.float32)
            loss = F.triplet_margin_with_distance_loss(input, positive, negative, margin=1.0, reduction='none')
            print(loss)
            # Tensor([0.        , 0.57496738, 0.        ])


            loss = F.triplet_margin_with_distance_loss(input, positive, negative, margin=1.0, reduction='mean')
            print(loss)
            # Tensor([0.19165580])

    """
    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError("'reduction' in 'triplet_margin_with_distance_loss' "
                         "should be 'sum', 'mean' or 'none', "
                         "but received {}.".format(reduction))
    if margin < 0:
        raise ValueError(
            "The margin between positive samples and negative samples should be greater than 0."
        )
    if not _non_static_mode():
        check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                                 'triplet_margin_with_distance_loss')
        check_variable_and_dtype(positive, 'positive', ['float32', 'float64'],
                                 'triplet_margin_with_distance_loss')
        check_variable_and_dtype(negative, 'negative', ['float32', 'float64'],
                                 'triplet_margin_with_distance_loss')

    if not (input.shape == positive.shape == negative.shape):
        raise ValueError("input's shape must equal to "
                         "positive's shape and  "
                         "negative's shape")

    distance_function = distance_function if distance_function is not None \
        else paddle.nn.PairwiseDistance(2)

    positive_dist = distance_function(input, positive)
    negative_dist = distance_function(input, negative)

    if swap:
        swap_dist = distance_function(positive, negative)
        negative_dist = paddle.minimum(negative_dist, swap_dist)

    if not paddle.all(positive_dist > 0) or not paddle.all(negative_dist > 0):
        raise ValueError(
            "The positive distance or negative distance should be greater than 0, "
            "The distance functions should be checked.")

    loss = paddle.clip(positive_dist - negative_dist + margin, min=0.0)

    if reduction == 'mean':
        return paddle.mean(loss, name=name)
    elif reduction == 'sum':
        return paddle.sum(loss, name=name)
    elif reduction == 'none':
        return loss
Y
yangguohao 已提交
3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457


def triplet_margin_loss(input,
                        positive,
                        negative,
                        margin=1.0,
                        p=2,
                        epsilon=1e-6,
                        swap=False,
                        reduction='mean',
                        name=None):
    r"""
        Measures the triplet loss given an input
        tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`.
        This is used for measuring a relative similarity between samples. A triplet
        is composed by `input`, `positive` and `negative` (i.e., `input`, `positive examples` and `negative
        examples` respectively). The shapes of all input tensors should be
        :math:`(N, *)`.

        The loss function for each sample in the mini-batch is:

        .. math::
            L(input, pos, neg) = \max \{d(input_i, pos_i) - d(input_i, neg_i) + {\rm margin}, 0\}


        where

        .. math::
            d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p

    Parameters:
        input (Tensor): Input tensor, the data type is float32 or float64.
            the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64.

        positive (Tensor): Positive tensor, the data type is float32 or float64.
            The shape of label is the same as the shape of input.

        negative (Tensor): Negative tensor, the data type is float32 or float64.
            The shape of label is the same as the shape of input.

        margin (float, Optional): Default: :math:`1`.

        p (int, Optional): The norm degree for pairwise distance. Default: :math:`2`.

        epsilon (float, Optional): Add small value to avoid division by zero,
            default value is 1e-6.

        swap (bool,Optional): The distance swap change the negative distance to the distance between
            positive sample and negative sample. For more details, see `Learning shallow convolutional feature descriptors with triplet losses`.
            Default: ``False``.


        reduction (str, Optional):Indicate how to average the loss by batch_size.
            the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
            If :attr:`reduction` is ``'none'``, the unreduced loss is returned;
            If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
            If :attr:`reduction` is ``'sum'``, the summed loss is returned.
            Default: ``'mean'``

        name (str, Optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Output: Tensor. The tensor variable storing the triplet_margin_loss of input and positive and negative.

    Examples:
        .. code-block:: python

            import paddle
            import paddle.nn.functional as F

            input = paddle.to_tensor([[1, 5, 3], [0, 3, 2], [1, 4, 1]], dtype=paddle.float32)
            positive= paddle.to_tensor([[5, 1, 2], [3, 2, 1], [3, -1, 1]], dtype=paddle.float32)
            negative = paddle.to_tensor([[2, 1, -3], [1, 1, -1], [4, -2, 1]], dtype=paddle.float32)
            loss = F.triplet_margin_loss(input, positive, negative, margin=1.0, reduction='none')
            print(loss)
            # Tensor([0.        , 0.57496738, 0.        ])


            loss = F.triplet_margin_loss(input, positive, negative, margin=1.0, reduction='mean')
            print(loss)
            # Tensor([0.19165580])

    """
    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
            "'reduction' in 'triplet_margin_loss' should be 'sum', 'mean' or 'none', "
            "but received {}.".format(reduction))
    if margin < 0:
        raise ValueError(
            "The margin between positive samples and negative samples should be greater than 0."
        )
    if not _non_static_mode():
        check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                                 'triplet_margin_loss')
        check_variable_and_dtype(positive, 'positive', ['float32', 'float64'],
                                 'triplet_margin_loss')
        check_variable_and_dtype(negative, 'negative', ['float32', 'float64'],
                                 'triplet_margin_loss')

    if not (input.shape == positive.shape == negative.shape):
        raise ValueError("input's shape must equal to "
                         "positive's shape and  "
                         "negative's shape")

    distance_function = paddle.nn.PairwiseDistance(p, epsilon=epsilon)
    positive_dist = distance_function(input, positive)
    negative_dist = distance_function(input, negative)

    if swap:
        swap_dist = distance_function(positive, negative)
        negative_dist = paddle.minimum(negative_dist, swap_dist)

    loss = paddle.clip(positive_dist - negative_dist + margin, min=0.0)

    if reduction == 'mean':
        return paddle.mean(loss, name=name)
    elif reduction == 'sum':
        return paddle.sum(loss, name=name)
    elif reduction == 'none':
        return loss
3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536


def soft_margin_loss(input, label, reduction='mean', name=None):
    """
    The API measures the soft margin loss between input predictions ``input``
    and target labels ``label`` . It can be described as:

    .. math::
        Out = log(1 + exp((-label * input)))

    Parameters:

        input (Tensor): The input predications tensor with shape: [N, *],
            N is batch_size, `*` means any number of additional dimensions. The ``input`` ranges from -inf to inf.
             Available dtype is float32, float64.

        label (Tensor): The target labels tensor with the same shape as
            ``input``. The target labels which values should be numbers -1 or 1.
            Available dtype is int32, int64, float32, float64.

        reduction (str, optional): Indicate how to average the loss by batch_size,
            the candidates are ``'none'`` | ``'mean'`` | ``'sum'``.
            If :attr:`reduction` is ``'none'``, the unreduced loss is returned;
            If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
            If :attr:`reduction` is ``'sum'``, the summed loss is returned.
            Default is ``'mean'``.

        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Returns:

        Output (Tensor): If ``reduction`` is ``'none'``, the shape of output is
            same as ``input`` , else the shape of output is [1].

    Examples:
        .. code-block:: python

            import paddle
            import numpy as np

            input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32')
            label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32')
            output = paddle.nn.functional.soft_margin_loss(input, label)

            input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64)
            label_np = np.random.randint(0, 2, size=(5, 5)).astype(np.int64)
            label_np[label_np==0]=-1
            input = paddle.to_tensor(input_np)
            label = paddle.to_tensor(label_np)
            output = paddle.nn.functional.soft_margin_loss(input, label, reduction='none')
    """
    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
            "The value of 'reduction' in soft_margin_loss should be 'sum', "
            "'mean' or 'none', but received %s, which is not allowed." %
            reduction)

    if not _non_static_mode():
        fluid.data_feeder.check_variable_and_dtype(input, 'input',
                                                   ['float32', 'float64'],
                                                   'soft_margin_loss')
        fluid.data_feeder.check_variable_and_dtype(
            label, 'label', ['int32', 'int64', 'float32', 'float64'],
            'soft_margin_loss')

    if not (input.shape == label.shape):
        raise ValueError("input's shape must equal to "
                         "label's shape")

    label = fluid.layers.cast(label, input.dtype)
    out = paddle.log(1 + paddle.exp(-label * input))

    if reduction == 'sum':
        return paddle.sum(out, name=name)
    elif reduction == 'mean':
        return paddle.mean(out, name=name)
    else:
        return out