import torch import torch.nn as nn import torch.nn.functional as F from pytorch_widedeep.wdtypes import * # noqa: F403 use_cuda = torch.cuda.is_available() class FocalLoss(nn.Module): r"""Implementation of the `focal loss `_ for both binary and multiclass classification :math:`FL(p_t) = \alpha (1 - p_t)^{\gamma} log(p_t)` where, for a case of a binary classification problem :math:`\begin{equation} p_t= \begin{cases}p, & \text{if $y=1$}.\\1-p, & \text{otherwise}. \end{cases} \end{equation}` Parameters ---------- alpha: float Focal Loss ``alpha`` parameter gamma: float Focal Loss ``gamma`` parameter """ def __init__(self, alpha: float = 0.25, gamma: float = 1.0): super().__init__() self.alpha = alpha self.gamma = gamma def _get_weight(self, p: Tensor, t: Tensor) -> Tensor: pt = p * t + (1 - p) * (1 - t) # type: ignore w = self.alpha * t + (1 - self.alpha) * (1 - t) # type: ignore return (w * (1 - pt).pow(self.gamma)).detach() # type: ignore def forward(self, input: Tensor, target: Tensor) -> Tensor: r""" Parameters ---------- input: Tensor input tensor with predictions (not probabilities) target: Tensor target tensor with the actual classes Examples -------- >>> import torch >>> >>> from pytorch_widedeep.losses import FocalLoss >>> >>> # BINARY >>> target = torch.tensor([0, 1, 0, 1]).view(-1, 1) >>> input = torch.tensor([[0.6, 0.7, 0.3, 0.8]]).t() >>> FocalLoss()(input, target) tensor(0.1762) >>> >>> # MULTICLASS >>> target = torch.tensor([1, 0, 2]).view(-1, 1) >>> input = torch.tensor([[0.2, 0.5, 0.3], [0.8, 0.1, 0.1], [0.7, 0.2, 0.1]]) >>> FocalLoss()(input, target) tensor(0.2573) """ input_prob = torch.sigmoid(input) if input.size(1) == 1: input_prob = torch.cat([1 - input_prob, input_prob], axis=1) # type: ignore num_class = 2 else: num_class = input_prob.size(1) binary_target = torch.eye(num_class)[target.squeeze().long()] if use_cuda: binary_target = binary_target.cuda() binary_target = binary_target.contiguous() weight = self._get_weight(input_prob, binary_target) return F.binary_cross_entropy( input_prob, binary_target, weight, reduction="mean" ) class MSLELoss(nn.Module): r"""mean squared log error""" def __init__(self): super().__init__() self.mse = nn.MSELoss() def forward(self, input: Tensor, target: Tensor) -> Tensor: r""" Parameters ---------- input: Tensor input tensor with predictions (not probabilities) target: Tensor target tensor with the actual classes Examples -------- >>> import torch >>> from pytorch_widedeep.losses import MSLELoss >>> >>> target = torch.tensor([1, 1.2, 0, 2]).view(-1, 1) >>> input = torch.tensor([0.6, 0.7, 0.3, 0.8]).view(-1, 1) >>> MSLELoss()(input, target) tensor(0.1115) """ return self.mse(torch.log(input + 1), torch.log(target + 1)) class RMSELoss(nn.Module): r"""root mean squared error""" def __init__(self): super().__init__() self.mse = nn.MSELoss() def forward(self, input: Tensor, target: Tensor) -> Tensor: r""" Parameters ---------- input: Tensor input tensor with predictions (not probabilities) target: Tensor target tensor with the actual classes Examples -------- >>> import torch >>> from pytorch_widedeep.losses import RMSELoss >>> >>> target = torch.tensor([1, 1.2, 0, 2]).view(-1, 1) >>> input = torch.tensor([0.6, 0.7, 0.3, 0.8]).view(-1, 1) >>> RMSELoss()(input, target) tensor(0.6964) """ return torch.sqrt(self.mse(input, target)) class RMSLELoss(nn.Module): r"""root mean squared log error""" def __init__(self): super().__init__() self.mse = nn.MSELoss() def forward(self, input: Tensor, target: Tensor) -> Tensor: r""" Parameters ---------- input: Tensor input tensor with predictions (not probabilities) target: Tensor target tensor with the actual classes Examples -------- >>> import torch >>> from pytorch_widedeep.losses import RMSLELoss >>> >>> target = torch.tensor([1, 1.2, 0, 2]).view(-1, 1) >>> input = torch.tensor([0.6, 0.7, 0.3, 0.8]).view(-1, 1) >>> RMSLELoss()(input, target) tensor(0.3339) """ return torch.sqrt(self.mse(torch.log(input + 1), torch.log(target + 1)))