diff --git a/ppcls/loss/deephashloss.py b/ppcls/loss/deephashloss.py index 959fd11ada6bf2812c2cd028926ab1e9a469d688..7dda519a871cc0460160ea4411c286ac5c25d6ad 100644 --- a/ppcls/loss/deephashloss.py +++ b/ppcls/loss/deephashloss.py @@ -20,6 +20,7 @@ class DSHSDLoss(nn.Layer): """ # DSHSD(IEEE ACCESS 2019) # paper [Deep Supervised Hashing Based on Stable Distribution](https://ieeexplore.ieee.org/document/8648432/) + # code reference: https://github.com/swuxyj/DeepHash-pytorch/blob/master/DSHSD.py """ def __init__(self, alpha, multi_label=False): @@ -62,6 +63,7 @@ class DSHSDLoss(nn.Layer): class LCDSHLoss(nn.Layer): """ # paper [Locality-Constrained Deep Supervised Hashing for Image Retrieval](https://www.ijcai.org/Proceedings/2017/0499.pdf) + # code reference: https://github.com/swuxyj/DeepHash-pytorch/blob/master/LCDSH.py """ def __init__(self, n_class, _lambda): @@ -100,6 +102,7 @@ class DCHLoss(paddle.nn.Layer): """ # paper [Deep Cauchy Hashing for Hamming Space Retrieval] URL:(http://ise.thss.tsinghua.edu.cn/~mlong/doc/deep-cauchy-hashing-cvpr18.pdf) + # code reference: https://github.com/swuxyj/DeepHash-pytorch/blob/master/DCH.py """ def __init__(self, gamma, _lambda, n_class): diff --git a/ppcls/loss/emlloss.py b/ppcls/loss/emlloss.py index 973570389ac08e11b47449fbefbaa9e5e8e33c83..38b707fe1a4eb9ed6d130a3eb9bc4f8762d4c189 100644 --- a/ppcls/loss/emlloss.py +++ b/ppcls/loss/emlloss.py @@ -23,6 +23,11 @@ from .comfunc import rerange_index class EmlLoss(paddle.nn.Layer): + """Ensemble Metric Learning Loss + paper: [Large Scale Strongly Supervised Ensemble Metric Learning, with Applications to Face Verification and Retrieval](https://arxiv.org/pdf/1212.6094.pdf) + code reference: https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/metric_learning/losses/emlloss.py + """ + def __init__(self, batch_size=40, samples_each_class=2): super(EmlLoss, self).__init__() assert (batch_size % samples_each_class == 0) diff --git a/ppcls/loss/googlenetloss.py b/ppcls/loss/googlenetloss.py index c580aa61701be6f5b6be43ce52a31be363b40d95..491311831acf90e11474f0a82713ef096221eb7f 100644 --- a/ppcls/loss/googlenetloss.py +++ b/ppcls/loss/googlenetloss.py @@ -18,11 +18,13 @@ import paddle.nn.functional as F class GoogLeNetLoss(nn.Layer): """ Cross entropy loss used after googlenet + reference paper: [https://arxiv.org/pdf/1409.4842v1.pdf](Going Deeper with Convolutions) """ + def __init__(self, epsilon=None): super().__init__() - assert (epsilon is None or epsilon <= 0 or epsilon >= 1), "googlenet is not support label_smooth" - + assert (epsilon is None or epsilon <= 0 or + epsilon >= 1), "googlenet is not support label_smooth" def forward(self, inputs, label): input0, input1, input2 = inputs diff --git a/ppcls/loss/msmloss.py b/ppcls/loss/msmloss.py index 3aa0dd8bfb0cdc6f558ff9891f0e0000ef183fae..adf03ef8e03c942fd1f2635704b9929e439dc3f5 100644 --- a/ppcls/loss/msmloss.py +++ b/ppcls/loss/msmloss.py @@ -21,10 +21,12 @@ from .comfunc import rerange_index class MSMLoss(paddle.nn.Layer): """ - MSMLoss Loss, based on triplet loss. USE P * K samples. + paper : [Margin Sample Mining Loss: A Deep Learning Based Method for Person Re-identification](https://arxiv.org/pdf/1710.00478.pdf) + code reference: https://github.com/michuanhaohao/keras_reid/blob/master/reid_tripletcls.py + Margin Sample Mining Loss, based on triplet loss. USE P * K samples. the batch size is fixed. Batch_size = P * K; but the K may vary between batches. same label gather together - + supported_metrics = [ 'euclidean', 'sqeuclidean', @@ -41,7 +43,7 @@ class MSMLoss(paddle.nn.Layer): self.rerange_index = rerange_index(batch_size, samples_each_class) def forward(self, input, target=None): - #normalization + #normalization features = input["features"] features = self._nomalize(features) samples_each_class = self.samples_each_class @@ -53,7 +55,7 @@ class MSMLoss(paddle.nn.Layer): features, axis=0) similary_matrix = paddle.sum(paddle.square(diffs), axis=-1) - #rerange + #rerange tmp = paddle.reshape(similary_matrix, shape=[-1, 1]) tmp = paddle.gather(tmp, index=rerange_index) similary_matrix = paddle.reshape(tmp, shape=[-1, self.batch_size]) diff --git a/ppcls/loss/npairsloss.py b/ppcls/loss/npairsloss.py index d4b359e88119a735442858cb8dbe3fa255add09a..131c799a48abb9507cfe7ae16dd2aa34bf8c8f25 100644 --- a/ppcls/loss/npairsloss.py +++ b/ppcls/loss/npairsloss.py @@ -5,6 +5,11 @@ import paddle class NpairsLoss(paddle.nn.Layer): + """Npair_loss_ + paper [Improved deep metric learning with multi-class N-pair loss objective](https://dl.acm.org/doi/10.5555/3157096.3157304) + code reference: https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/contrib/losses/metric_learning/npairs_loss + """ + def __init__(self, reg_lambda=0.01): super(NpairsLoss, self).__init__() self.reg_lambda = reg_lambda diff --git a/ppcls/loss/pairwisecosface.py b/ppcls/loss/pairwisecosface.py index beb806863bd171635452fd67993cc56404efe0b6..7f146dea5454b90e08a4790b2d95d4624c76bb0d 100644 --- a/ppcls/loss/pairwisecosface.py +++ b/ppcls/loss/pairwisecosface.py @@ -23,6 +23,11 @@ import paddle.nn.functional as F class PairwiseCosface(nn.Layer): + """ + paper: Circle Loss: A Unified Perspective of Pair Similarity Optimization + code reference: https://github.com/leoluopy/circle-loss-demonstration/blob/main/circle_loss.py + """ + def __init__(self, margin, gamma): super(PairwiseCosface, self).__init__() self.margin = margin @@ -36,8 +41,10 @@ class PairwiseCosface(nn.Layer): dist_mat = paddle.matmul(embedding, embedding, transpose_y=True) N = dist_mat.shape[0] - is_pos = targets.reshape([N,1]).expand([N,N]).equal(paddle.t(targets.reshape([N,1]).expand([N,N]))).astype('float') - is_neg = targets.reshape([N,1]).expand([N,N]).not_equal(paddle.t(targets.reshape([N,1]).expand([N,N]))).astype('float') + is_pos = targets.reshape([N, 1]).expand([N, N]).equal( + paddle.t(targets.reshape([N, 1]).expand([N, N]))).astype('float') + is_neg = targets.reshape([N, 1]).expand([N, N]).not_equal( + paddle.t(targets.reshape([N, 1]).expand([N, N]))).astype('float') # Mask scores related to itself is_pos = is_pos - paddle.eye(N, N) @@ -46,10 +53,12 @@ class PairwiseCosface(nn.Layer): s_n = dist_mat * is_neg logit_p = -self.gamma * s_p + (-99999999.) * (1 - is_pos) - logit_n = self.gamma * (s_n + self.margin) + (-99999999.) * (1 - is_neg) - - loss = F.softplus(paddle.logsumexp(logit_p, axis=1) + paddle.logsumexp(logit_n, axis=1)).mean() - - return {"PairwiseCosface": loss} + logit_n = self.gamma * (s_n + self.margin) + (-99999999.) * (1 - is_neg + ) + loss = F.softplus( + paddle.logsumexp( + logit_p, axis=1) + paddle.logsumexp( + logit_n, axis=1)).mean() + return {"PairwiseCosface": loss} diff --git a/ppcls/loss/rkdloss.py b/ppcls/loss/rkdloss.py index e6ffea273431ec7105d0cdedd0225c40648d2660..aa6ae232438ed6d5a915ea982092f3711d2901c3 100644 --- a/ppcls/loss/rkdloss.py +++ b/ppcls/loss/rkdloss.py @@ -29,6 +29,7 @@ def pdist(e, squared=False, eps=1e-12): class RKdAngle(nn.Layer): + # paper : [Relational Knowledge Distillation](https://arxiv.org/abs/1904.05068?context=cs.LG) # reference: https://github.com/lenscloth/RKD/blob/master/metric/loss.py def __init__(self, target_size=None): super().__init__() @@ -64,6 +65,7 @@ class RKdAngle(nn.Layer): class RkdDistance(nn.Layer): + # paper : [Relational Knowledge Distillation](https://arxiv.org/abs/1904.05068?context=cs.LG) # reference: https://github.com/lenscloth/RKD/blob/master/metric/loss.py def __init__(self, eps=1e-12, target_size=1): super().__init__() diff --git a/ppcls/loss/supconloss.py b/ppcls/loss/supconloss.py index 3dd33bc19e97ddb29966f55c4789b7a4ae81422b..753ceaf415b28793ebd1758c9cde84316d04e70b 100644 --- a/ppcls/loss/supconloss.py +++ b/ppcls/loss/supconloss.py @@ -4,6 +4,7 @@ from paddle import nn class SupConLoss(nn.Layer): """Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf. + code reference: https://github.com/HobbitLong/SupContrast/blob/master/losses.py It also supports the unsupervised contrastive loss in SimCLR""" def __init__(self, diff --git a/ppcls/loss/trihardloss.py b/ppcls/loss/trihardloss.py index 132c604d51920786e89c331b3223884910e50fa8..96cb42cb46b018abc77286eb96ef5e44f20f67b0 100644 --- a/ppcls/loss/trihardloss.py +++ b/ppcls/loss/trihardloss.py @@ -22,10 +22,12 @@ from .comfunc import rerange_index class TriHardLoss(paddle.nn.Layer): """ + paper: In Defense of the Triplet Loss for Person Re-Identification + code reference: https://github.com/VisualComputingInstitute/triplet-reid/blob/master/loss.py TriHard Loss, based on triplet loss. USE P * K samples. the batch size is fixed. Batch_size = P * K; but the K may vary between batches. same label gather together - + supported_metrics = [ 'euclidean', 'sqeuclidean', @@ -45,7 +47,7 @@ class TriHardLoss(paddle.nn.Layer): features = input["features"] assert (self.batch_size == features.shape[0]) - #normalization + #normalization features = self._nomalize(features) samples_each_class = self.samples_each_class rerange_index = paddle.to_tensor(self.rerange_index) @@ -56,7 +58,7 @@ class TriHardLoss(paddle.nn.Layer): features, axis=0) similary_matrix = paddle.sum(paddle.square(diffs), axis=-1) - #rerange + #rerange tmp = paddle.reshape(similary_matrix, shape=[-1, 1]) tmp = paddle.gather(tmp, index=rerange_index) similary_matrix = paddle.reshape(tmp, shape=[-1, self.batch_size]) diff --git a/ppcls/loss/triplet.py b/ppcls/loss/triplet.py index d1c7eec9e6031aa7e51a1a3575094e7d1a4f90df..458ee2e27d7b550fecfe16e5208047a8919b89d0 100644 --- a/ppcls/loss/triplet.py +++ b/ppcls/loss/triplet.py @@ -1,3 +1,17 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from __future__ import absolute_import from __future__ import division from __future__ import print_function @@ -8,6 +22,8 @@ import paddle.nn as nn class TripletLossV2(nn.Layer): """Triplet loss with hard positive/negative mining. + paper : [Facenet: A unified embedding for face recognition and clustering](https://arxiv.org/pdf/1503.03832.pdf) + code reference: https://github.com/okzhili/Cartoon-face-recognition/blob/master/loss/triplet_loss.py Args: margin (float): margin for triplet. """