circlemargin.py 2.2 KB
Newer Older
B
Bin Lu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
W
weishengyu 已提交
19 20


B
Bin Lu 已提交
21
class CircleMargin(nn.Layer):
W
weishengyu 已提交
22
    def __init__(self, embedding_size, class_num, margin, scale):
F
Felix 已提交
23
        super(CircleMargin, self).__init__()
W
weishengyu 已提交
24
        self.scale = scale
B
Bin Lu 已提交
25 26 27 28
        self.margin = margin
        self.embedding_size = embedding_size
        self.class_num = class_num

W
weishengyu 已提交
29 30
        weight_attr = paddle.ParamAttr(
            initializer=paddle.nn.initializer.XavierNormal())
B
Bin Lu 已提交
31
        self.fc = paddle.nn.Linear(
W
weishengyu 已提交
32 33
            self.embedding_size, self.class_num, weight_attr=weight_attr)

B
Bin Lu 已提交
34
    def forward(self, input, label):
W
weishengyu 已提交
35 36
        feat_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
B
Bin Lu 已提交
37 38
        input = paddle.divide(input, feat_norm)

B
Bin Lu 已提交
39
        weight = self.fc.weight
W
weishengyu 已提交
40 41
        weight_norm = paddle.sqrt(
            paddle.sum(paddle.square(weight), axis=0, keepdim=True))
B
Bin Lu 已提交
42
        weight = paddle.divide(weight, weight_norm)
W
weishengyu 已提交
43 44

        logits = paddle.matmul(input, weight)
B
Bin Lu 已提交
45 46
        if not self.training or label is None:
            return logits
B
Bin Lu 已提交
47 48 49 50 51

        alpha_p = paddle.clip(-logits.detach() + 1 + self.margin, min=0.)
        alpha_n = paddle.clip(logits.detach() + self.margin, min=0.)
        delta_p = 1 - self.margin
        delta_n = self.margin
B
Bin Lu 已提交
52
        
B
Bin Lu 已提交
53
        m_hot = F.one_hot(label.reshape([-1]), num_classes=logits.shape[1])
B
Bin Lu 已提交
54
        
B
Bin Lu 已提交
55 56 57 58
        logits_p = alpha_p * (logits - delta_p)
        logits_n = alpha_n * (logits - delta_n)
        pre_logits = logits_p * m_hot + logits_n * (1 - m_hot)
        pre_logits = self.scale * pre_logits
W
weishengyu 已提交
59

B
Bin Lu 已提交
60
        return pre_logits