未验证 提交 00bf84c4 编写于 作者: W Walter 提交者: GitHub

Merge pull request #1886 from HydrogenSulfate/refine_ref

Refine paper & code ref for loss
......@@ -20,6 +20,7 @@ class DSHSDLoss(nn.Layer):
"""
# DSHSD(IEEE ACCESS 2019)
# paper [Deep Supervised Hashing Based on Stable Distribution](https://ieeexplore.ieee.org/document/8648432/)
# code reference: https://github.com/swuxyj/DeepHash-pytorch/blob/master/DSHSD.py
"""
def __init__(self, alpha, multi_label=False):
......@@ -62,6 +63,7 @@ class DSHSDLoss(nn.Layer):
class LCDSHLoss(nn.Layer):
"""
# paper [Locality-Constrained Deep Supervised Hashing for Image Retrieval](https://www.ijcai.org/Proceedings/2017/0499.pdf)
# code reference: https://github.com/swuxyj/DeepHash-pytorch/blob/master/LCDSH.py
"""
def __init__(self, n_class, _lambda):
......@@ -100,6 +102,7 @@ class DCHLoss(paddle.nn.Layer):
"""
# paper [Deep Cauchy Hashing for Hamming Space Retrieval]
URL:(http://ise.thss.tsinghua.edu.cn/~mlong/doc/deep-cauchy-hashing-cvpr18.pdf)
# code reference: https://github.com/swuxyj/DeepHash-pytorch/blob/master/DCH.py
"""
def __init__(self, gamma, _lambda, n_class):
......
......@@ -23,6 +23,11 @@ from .comfunc import rerange_index
class EmlLoss(paddle.nn.Layer):
"""Ensemble Metric Learning Loss
paper: [Large Scale Strongly Supervised Ensemble Metric Learning, with Applications to Face Verification and Retrieval](https://arxiv.org/pdf/1212.6094.pdf)
code reference: https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/metric_learning/losses/emlloss.py
"""
def __init__(self, batch_size=40, samples_each_class=2):
super(EmlLoss, self).__init__()
assert (batch_size % samples_each_class == 0)
......
......@@ -18,11 +18,13 @@ import paddle.nn.functional as F
class GoogLeNetLoss(nn.Layer):
"""
Cross entropy loss used after googlenet
reference paper: [https://arxiv.org/pdf/1409.4842v1.pdf](Going Deeper with Convolutions)
"""
def __init__(self, epsilon=None):
super().__init__()
assert (epsilon is None or epsilon <= 0 or epsilon >= 1), "googlenet is not support label_smooth"
assert (epsilon is None or epsilon <= 0 or
epsilon >= 1), "googlenet is not support label_smooth"
def forward(self, inputs, label):
input0, input1, input2 = inputs
......
......@@ -21,10 +21,12 @@ from .comfunc import rerange_index
class MSMLoss(paddle.nn.Layer):
"""
MSMLoss Loss, based on triplet loss. USE P * K samples.
paper : [Margin Sample Mining Loss: A Deep Learning Based Method for Person Re-identification](https://arxiv.org/pdf/1710.00478.pdf)
code reference: https://github.com/michuanhaohao/keras_reid/blob/master/reid_tripletcls.py
Margin Sample Mining Loss, based on triplet loss. USE P * K samples.
the batch size is fixed. Batch_size = P * K; but the K may vary between batches.
same label gather together
supported_metrics = [
'euclidean',
'sqeuclidean',
......@@ -41,7 +43,7 @@ class MSMLoss(paddle.nn.Layer):
self.rerange_index = rerange_index(batch_size, samples_each_class)
def forward(self, input, target=None):
#normalization
#normalization
features = input["features"]
features = self._nomalize(features)
samples_each_class = self.samples_each_class
......@@ -53,7 +55,7 @@ class MSMLoss(paddle.nn.Layer):
features, axis=0)
similary_matrix = paddle.sum(paddle.square(diffs), axis=-1)
#rerange
#rerange
tmp = paddle.reshape(similary_matrix, shape=[-1, 1])
tmp = paddle.gather(tmp, index=rerange_index)
similary_matrix = paddle.reshape(tmp, shape=[-1, self.batch_size])
......
......@@ -5,6 +5,11 @@ import paddle
class NpairsLoss(paddle.nn.Layer):
"""Npair_loss_
paper [Improved deep metric learning with multi-class N-pair loss objective](https://dl.acm.org/doi/10.5555/3157096.3157304)
code reference: https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/contrib/losses/metric_learning/npairs_loss
"""
def __init__(self, reg_lambda=0.01):
super(NpairsLoss, self).__init__()
self.reg_lambda = reg_lambda
......
......@@ -23,6 +23,11 @@ import paddle.nn.functional as F
class PairwiseCosface(nn.Layer):
"""
paper: Circle Loss: A Unified Perspective of Pair Similarity Optimization
code reference: https://github.com/leoluopy/circle-loss-demonstration/blob/main/circle_loss.py
"""
def __init__(self, margin, gamma):
super(PairwiseCosface, self).__init__()
self.margin = margin
......@@ -36,8 +41,10 @@ class PairwiseCosface(nn.Layer):
dist_mat = paddle.matmul(embedding, embedding, transpose_y=True)
N = dist_mat.shape[0]
is_pos = targets.reshape([N,1]).expand([N,N]).equal(paddle.t(targets.reshape([N,1]).expand([N,N]))).astype('float')
is_neg = targets.reshape([N,1]).expand([N,N]).not_equal(paddle.t(targets.reshape([N,1]).expand([N,N]))).astype('float')
is_pos = targets.reshape([N, 1]).expand([N, N]).equal(
paddle.t(targets.reshape([N, 1]).expand([N, N]))).astype('float')
is_neg = targets.reshape([N, 1]).expand([N, N]).not_equal(
paddle.t(targets.reshape([N, 1]).expand([N, N]))).astype('float')
# Mask scores related to itself
is_pos = is_pos - paddle.eye(N, N)
......@@ -46,10 +53,12 @@ class PairwiseCosface(nn.Layer):
s_n = dist_mat * is_neg
logit_p = -self.gamma * s_p + (-99999999.) * (1 - is_pos)
logit_n = self.gamma * (s_n + self.margin) + (-99999999.) * (1 - is_neg)
loss = F.softplus(paddle.logsumexp(logit_p, axis=1) + paddle.logsumexp(logit_n, axis=1)).mean()
return {"PairwiseCosface": loss}
logit_n = self.gamma * (s_n + self.margin) + (-99999999.) * (1 - is_neg
)
loss = F.softplus(
paddle.logsumexp(
logit_p, axis=1) + paddle.logsumexp(
logit_n, axis=1)).mean()
return {"PairwiseCosface": loss}
......@@ -29,6 +29,7 @@ def pdist(e, squared=False, eps=1e-12):
class RKdAngle(nn.Layer):
# paper : [Relational Knowledge Distillation](https://arxiv.org/abs/1904.05068?context=cs.LG)
# reference: https://github.com/lenscloth/RKD/blob/master/metric/loss.py
def __init__(self, target_size=None):
super().__init__()
......@@ -64,6 +65,7 @@ class RKdAngle(nn.Layer):
class RkdDistance(nn.Layer):
# paper : [Relational Knowledge Distillation](https://arxiv.org/abs/1904.05068?context=cs.LG)
# reference: https://github.com/lenscloth/RKD/blob/master/metric/loss.py
def __init__(self, eps=1e-12, target_size=1):
super().__init__()
......
......@@ -4,6 +4,7 @@ from paddle import nn
class SupConLoss(nn.Layer):
"""Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
code reference: https://github.com/HobbitLong/SupContrast/blob/master/losses.py
It also supports the unsupervised contrastive loss in SimCLR"""
def __init__(self,
......
......@@ -22,10 +22,12 @@ from .comfunc import rerange_index
class TriHardLoss(paddle.nn.Layer):
"""
paper: In Defense of the Triplet Loss for Person Re-Identification
code reference: https://github.com/VisualComputingInstitute/triplet-reid/blob/master/loss.py
TriHard Loss, based on triplet loss. USE P * K samples.
the batch size is fixed. Batch_size = P * K; but the K may vary between batches.
same label gather together
supported_metrics = [
'euclidean',
'sqeuclidean',
......@@ -45,7 +47,7 @@ class TriHardLoss(paddle.nn.Layer):
features = input["features"]
assert (self.batch_size == features.shape[0])
#normalization
#normalization
features = self._nomalize(features)
samples_each_class = self.samples_each_class
rerange_index = paddle.to_tensor(self.rerange_index)
......@@ -56,7 +58,7 @@ class TriHardLoss(paddle.nn.Layer):
features, axis=0)
similary_matrix = paddle.sum(paddle.square(diffs), axis=-1)
#rerange
#rerange
tmp = paddle.reshape(similary_matrix, shape=[-1, 1])
tmp = paddle.gather(tmp, index=rerange_index)
similary_matrix = paddle.reshape(tmp, shape=[-1, self.batch_size])
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
......@@ -8,6 +22,8 @@ import paddle.nn as nn
class TripletLossV2(nn.Layer):
"""Triplet loss with hard positive/negative mining.
paper : [Facenet: A unified embedding for face recognition and clustering](https://arxiv.org/pdf/1503.03832.pdf)
code reference: https://github.com/okzhili/Cartoon-face-recognition/blob/master/loss/triplet_loss.py
Args:
margin (float): margin for triplet.
"""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册