diff --git a/python/paddle/fluid/tests/unittests/test_soft_margin_loss.py b/python/paddle/fluid/tests/unittests/test_soft_margin_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..3cf61c556e72b5b78d263997fcaa3c1bea3e704c --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_soft_margin_loss.py @@ -0,0 +1,177 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import numpy as np +import unittest + + +def test_static_layer( + place, + input_np, + label_np, + reduction='mean', +): + paddle.enable_static() + prog = paddle.static.Program() + startup_prog = paddle.static.Program() + with paddle.static.program_guard(prog, startup_prog): + input = paddle.static.data(name='input', + shape=input_np.shape, + dtype=input_np.dtype) + label = paddle.static.data(name='label', + shape=label_np.shape, + dtype=label_np.dtype) + sm_loss = paddle.nn.loss.SoftMarginLoss(reduction=reduction) + res = sm_loss(input, label) + exe = paddle.static.Executor(place) + static_result = exe.run(prog, + feed={ + "input": input_np, + "label": label_np + }, + fetch_list=[res]) + return static_result + + +def test_static_functional( + place, + input_np, + label_np, + reduction='mean', +): + paddle.enable_static() + prog = paddle.static.Program() + startup_prog = paddle.static.Program() + with paddle.static.program_guard(prog, startup_prog): + input = paddle.static.data(name='input', + shape=input_np.shape, + dtype=input_np.dtype) + label = paddle.static.data(name='label', + shape=label_np.shape, + dtype=label_np.dtype) + + res = paddle.nn.functional.soft_margin_loss(input, + label, + reduction=reduction) + exe = paddle.static.Executor(place) + static_result = exe.run(prog, + feed={ + "input": input_np, + "label": label_np + }, + fetch_list=[res]) + return static_result + + +def test_dygraph_layer( + place, + input_np, + label_np, + reduction='mean', +): + paddle.disable_static() + sm_loss = paddle.nn.loss.SoftMarginLoss(reduction=reduction) + dy_res = sm_loss(paddle.to_tensor(input_np), paddle.to_tensor(label_np)) + dy_result = dy_res.numpy() + paddle.enable_static() + return dy_result + + +def test_dygraph_functional( + place, + input_np, + label_np, + reduction='mean', +): + paddle.disable_static() + input = paddle.to_tensor(input_np) + label = paddle.to_tensor(label_np) + + dy_res = paddle.nn.functional.soft_margin_loss(input, + label, + reduction=reduction) + dy_result = dy_res.numpy() + paddle.enable_static() + return dy_result + + +def calc_softmarginloss( + input_np, + label_np, + reduction='mean', +): + expected = np.log(1 + np.exp(-label_np * input_np)) + # expected = np.mean(expected, axis=-1) + + if reduction == 'mean': + expected = np.mean(expected) + elif reduction == 'sum': + expected = np.sum(expected) + else: + expected = expected + + return expected + + +class TestSoftMarginLoss(unittest.TestCase): + + def test_SoftMarginLoss(self): + input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) + types = [np.int32, np.int64, np.float32, np.float64] + places = ['cpu'] + if paddle.device.is_compiled_with_cuda(): + places.append('gpu') + reductions = ['sum', 'mean', 'none'] + for place in places: + for reduction in reductions: + for _type in types: + label_np = np.random.randint(0, 2, + size=(5, 5)).astype(_type) + label_np[label_np == 0] = -1 + static_result = test_static_layer(place, input_np, label_np, + reduction) + dy_result = test_dygraph_layer(place, input_np, label_np, + reduction) + expected = calc_softmarginloss(input_np, label_np, + reduction) + self.assertTrue(np.allclose(static_result, expected)) + self.assertTrue(np.allclose(static_result, dy_result)) + self.assertTrue(np.allclose(dy_result, expected)) + static_functional = test_static_functional( + place, input_np, label_np, reduction) + dy_functional = test_dygraph_functional( + place, input_np, label_np, reduction) + self.assertTrue(np.allclose(static_functional, expected)) + self.assertTrue( + np.allclose(static_functional, dy_functional)) + self.assertTrue(np.allclose(dy_functional, expected)) + + def test_SoftMarginLoss_error(self): + paddle.disable_static() + self.assertRaises(ValueError, + paddle.nn.loss.SoftMarginLoss, + reduction="unsupport reduction") + input = paddle.to_tensor([[0.1, 0.3]], dtype='float32') + label = paddle.to_tensor([[-1.0, 1.0]], dtype='float32') + self.assertRaises(ValueError, + paddle.nn.functional.soft_margin_loss, + input=input, + label=label, + reduction="unsupport reduction") + paddle.enable_static() + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index a3ae38c9794325ecad511b03f72b6e4cd9615937..e47fa8c3c5480d0b33ae0fa21a04245f217638f3 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -111,6 +111,7 @@ from .layer.loss import HingeEmbeddingLoss # noqa: F401 from .layer.loss import CosineEmbeddingLoss # noqa: F401 from .layer.loss import TripletMarginWithDistanceLoss from .layer.loss import TripletMarginLoss +from .layer.loss import SoftMarginLoss from .layer.norm import BatchNorm # noqa: F401 from .layer.norm import SyncBatchNorm # noqa: F401 from .layer.norm import GroupNorm # noqa: F401 @@ -320,4 +321,5 @@ __all__ = [ # noqa 'RReLU', 'TripletMarginWithDistanceLoss', 'TripletMarginLoss', + 'SoftMarginLoss', ] diff --git a/python/paddle/nn/functional/__init__.py b/python/paddle/nn/functional/__init__.py index a9c1d24e2c6fcc60dcda879a860f6025dde0ad6e..b5d2d6f5beb9ded22d767c40c4d0bff11b760a7d 100644 --- a/python/paddle/nn/functional/__init__.py +++ b/python/paddle/nn/functional/__init__.py @@ -94,6 +94,7 @@ from .loss import cosine_embedding_loss # noqa: F401 from .loss import multi_label_soft_margin_loss from .loss import triplet_margin_with_distance_loss from .loss import triplet_margin_loss +from .loss import soft_margin_loss from .norm import batch_norm # noqa: F401 from .norm import instance_norm # noqa: F401 from .norm import layer_norm # noqa: F401 @@ -238,4 +239,5 @@ __all__ = [ # noqa 'rrelu', 'triplet_margin_with_distance_loss', 'triplet_margin_loss', + 'soft_margin_loss', ] diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 2537a9f3ae610cf33af34c7f615f03fd4a34cd37..f661d7f9dbc936007d006f959713656e71846f93 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -3200,3 +3200,82 @@ def triplet_margin_loss(input, return paddle.sum(loss, name=name) elif reduction == 'none': return loss + + +def soft_margin_loss(input, label, reduction='mean', name=None): + """ + The API measures the soft margin loss between input predictions ``input`` + and target labels ``label`` . It can be described as: + + .. math:: + Out = log(1 + exp((-label * input))) + + Parameters: + + input (Tensor): The input predications tensor with shape: [N, *], + N is batch_size, `*` means any number of additional dimensions. The ``input`` ranges from -inf to inf. + Available dtype is float32, float64. + + label (Tensor): The target labels tensor with the same shape as + ``input``. The target labels which values should be numbers -1 or 1. + Available dtype is int32, int64, float32, float64. + + reduction (str, optional): Indicate how to average the loss by batch_size, + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. + If :attr:`reduction` is ``'none'``, the unreduced loss is returned; + If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; + If :attr:`reduction` is ``'sum'``, the summed loss is returned. + Default is ``'mean'``. + + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + + Returns: + + Output (Tensor): If ``reduction`` is ``'none'``, the shape of output is + same as ``input`` , else the shape of output is [1]. + + Examples: + .. code-block:: python + + import paddle + import numpy as np + + input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32') + label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32') + output = paddle.nn.functional.soft_margin_loss(input, label) + + input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) + label_np = np.random.randint(0, 2, size=(5, 5)).astype(np.int64) + label_np[label_np==0]=-1 + input = paddle.to_tensor(input_np) + label = paddle.to_tensor(label_np) + output = paddle.nn.functional.soft_margin_loss(input, label, reduction='none') + """ + if reduction not in ['sum', 'mean', 'none']: + raise ValueError( + "The value of 'reduction' in soft_margin_loss should be 'sum', " + "'mean' or 'none', but received %s, which is not allowed." % + reduction) + + if not _non_static_mode(): + fluid.data_feeder.check_variable_and_dtype(input, 'input', + ['float32', 'float64'], + 'soft_margin_loss') + fluid.data_feeder.check_variable_and_dtype( + label, 'label', ['int32', 'int64', 'float32', 'float64'], + 'soft_margin_loss') + + if not (input.shape == label.shape): + raise ValueError("input's shape must equal to " + "label's shape") + + label = fluid.layers.cast(label, input.dtype) + out = paddle.log(1 + paddle.exp(-label * input)) + + if reduction == 'sum': + return paddle.sum(out, name=name) + elif reduction == 'mean': + return paddle.mean(out, name=name) + else: + return out diff --git a/python/paddle/nn/layer/__init__.py b/python/paddle/nn/layer/__init__.py index e6f6a6508488ba6040516226af32fa76cb47a4bb..45cb652332b3ad8929d453e5c6a51d31b7ee72be 100644 --- a/python/paddle/nn/layer/__init__.py +++ b/python/paddle/nn/layer/__init__.py @@ -82,6 +82,7 @@ from .loss import SmoothL1Loss # noqa: F401 from .loss import HingeEmbeddingLoss # noqa: F401 from .loss import TripletMarginWithDistanceLoss from .loss import TripletMarginLoss +from .loss import SoftMarginLoss from .norm import BatchNorm1D # noqa: F401 from .norm import BatchNorm2D # noqa: F401 from .norm import BatchNorm3D # noqa: F401 diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index aeb213bb2c1f5efdbf3aba3d6b6abcd2cdcec4a1..54ef832d7317954467bed96758cfdc89dde644e7 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -1691,3 +1691,74 @@ class TripletMarginLoss(Layer): swap=self.swap, reduction=self.reduction, name=self.name) + + +class SoftMarginLoss(Layer): + r""" + Creates a criterion that measures a two-class soft margin loss between input predictions ``input`` + and target labels ``label`` . It can be described as: + + .. math:: + Out = log(1 + exp((-label * input))) + + Parameters: + + reduction (str, optional): Indicate how to average the loss by batch_size, + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. + If :attr:`reduction` is ``'none'``, the unreduced loss is returned; + If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; + If :attr:`reduction` is ``'sum'``, the summed loss is returned. + Default is ``'mean'``. + + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Shapes: + + Input (Tensor): The input tensor with shape: [N, *], + N is batch_size, `*` means any number of additional dimensions. The ``input`` ranges from -inf to inf + Available dtype is float32, float64. + + Label (Tensor): The target labels tensor with the same shape as + ``input``. The target labels which values should be numbers -1 or 1. + Available dtype is int32, int64, float32, float64. + + Output (Tensor): If ``reduction`` is ``'none'``, the shape of output is + same as ``input`` , else the shape of output is [1]. + + Returns: + A callable object of SoftMarginLoss. + + Examples: + .. code-block:: python + + import paddle + import numpy as np + + input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32') + label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32') + soft_margin_loss = paddle.nn.SoftMarginLoss() + output = soft_margin_loss(input, label) + + input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) + label_np = np.random.randint(0, 2, size=(5, 5)).astype(np.int64) + label_np[label_np==0]=-1 + input = paddle.to_tensor(input_np) + label = paddle.to_tensor(label_np) + soft_margin_loss = paddle.nn.SoftMarginLoss(reduction='none') + output = soft_margin_loss(input, label) + """ + + def __init__(self, reduction='mean', name=None): + if reduction not in ['sum', 'mean', 'none']: + raise ValueError( + "The value of 'reduction' in SoftMarginLoss should be 'sum', 'mean' or 'none', but " + "received %s, which is not allowed." % reduction) + + super(SoftMarginLoss, self).__init__() + self.reduction = reduction + self.name = name + + def forward(self, input, label): + out = paddle.nn.functional.soft_margin_loss(input, label, + self.reduction, self.name) + return out