emlloss.py 3.8 KB
Newer Older
B
Bin Lu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import math
import paddle
import numpy as np
from .comfunc import rerange_index

W
weishengyu 已提交
24

B
Bin Lu 已提交
25
class EmlLoss(paddle.nn.Layer):
H
HydrogenSulfate 已提交
26 27 28 29 30
    """Ensemble Metric Learning Loss
    paper: [Large Scale Strongly Supervised Ensemble Metric Learning, with Applications to Face Verification and Retrieval](https://arxiv.org/pdf/1212.6094.pdf)
    code reference: https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/metric_learning/losses/emlloss.py
    """

W
weishengyu 已提交
31
    def __init__(self, batch_size=40, samples_each_class=2):
B
Bin Lu 已提交
32
        super(EmlLoss, self).__init__()
W
weishengyu 已提交
33
        assert (batch_size % samples_each_class == 0)
B
Bin Lu 已提交
34
        self.samples_each_class = samples_each_class
W
weishengyu 已提交
35 36
        self.batch_size = batch_size
        self.rerange_index = rerange_index(batch_size, samples_each_class)
B
Bin Lu 已提交
37
        self.thresh = 20.0
W
weishengyu 已提交
38 39
        self.beta = 100000

B
Bin Lu 已提交
40
    def surrogate_function(self, beta, theta, bias):
W
weishengyu 已提交
41
        x = theta * paddle.exp(bias)
B
Bin Lu 已提交
42 43 44 45
        output = paddle.log(1 + beta * x) / math.log(1 + beta)
        return output

    def surrogate_function_approximate(self, beta, theta, bias):
W
weishengyu 已提交
46 47
        output = (
            paddle.log(theta) + bias + math.log(beta)) / math.log(1 + beta)
B
Bin Lu 已提交
48 49 50 51 52
        return output

    def surrogate_function_stable(self, beta, theta, target, thresh):
        max_gap = paddle.to_tensor(thresh, dtype='float32')
        max_gap.stop_gradient = True
W
weishengyu 已提交
53

B
Bin Lu 已提交
54 55
        target_max = paddle.maximum(target, max_gap)
        target_min = paddle.minimum(target, max_gap)
W
weishengyu 已提交
56

B
Bin Lu 已提交
57 58
        loss1 = self.surrogate_function(beta, theta, target_min)
        loss2 = self.surrogate_function_approximate(beta, theta, target_max)
W
weishengyu 已提交
59 60
        bias = self.surrogate_function(beta, theta, max_gap)
        loss = loss1 + loss2 - bias
B
Bin Lu 已提交
61 62 63 64 65
        return loss

    def forward(self, input, target=None):
        features = input["features"]
        samples_each_class = self.samples_each_class
W
weishengyu 已提交
66 67 68
        batch_size = self.batch_size
        rerange_index = self.rerange_index

B
Bin Lu 已提交
69
        #calc distance
W
weishengyu 已提交
70 71 72 73 74 75
        diffs = paddle.unsqueeze(
            features, axis=1) - paddle.unsqueeze(
                features, axis=0)
        similary_matrix = paddle.sum(paddle.square(diffs), axis=-1)

        tmp = paddle.reshape(similary_matrix, shape=[-1, 1])
B
Bin Lu 已提交
76
        rerange_index = paddle.to_tensor(rerange_index)
W
weishengyu 已提交
77 78 79 80 81 82 83 84 85 86
        tmp = paddle.gather(tmp, index=rerange_index)
        similary_matrix = paddle.reshape(tmp, shape=[-1, batch_size])

        ignore, pos, neg = paddle.split(
            similary_matrix,
            num_or_sections=[
                1, samples_each_class - 1, batch_size - samples_each_class
            ],
            axis=1)
        ignore.stop_gradient = True
B
Bin Lu 已提交
87 88 89 90 91 92 93 94

        pos_max = paddle.max(pos, axis=1, keepdim=True)
        pos = paddle.exp(pos - pos_max)
        pos_mean = paddle.mean(pos, axis=1, keepdim=True)

        neg_min = paddle.min(neg, axis=1, keepdim=True)
        neg = paddle.exp(neg_min - neg)
        neg_mean = paddle.mean(neg, axis=1, keepdim=True)
W
weishengyu 已提交
95

B
Bin Lu 已提交
96 97 98
        bias = pos_max - neg_min
        theta = paddle.multiply(neg_mean, pos_mean)

W
weishengyu 已提交
99 100
        loss = self.surrogate_function_stable(self.beta, theta, bias,
                                              self.thresh)
B
Bin Lu 已提交
101 102
        loss = paddle.mean(loss)
        return {"emlloss": loss}