From c7a19f169c42a8a2aa0c15f648355a069fcde8c0 Mon Sep 17 00:00:00 2001 From: shangliang Xu Date: Mon, 19 Jul 2021 11:01:42 +0800 Subject: [PATCH] fix some code (#3710) --- ppdet/modeling/heads/detr_head.py | 3 +-- ppdet/modeling/initializer.py | 4 +--- ppdet/modeling/losses/detr_loss.py | 4 ++-- ppdet/modeling/transformers/detr_transformer.py | 4 ++-- ppdet/modeling/transformers/utils.py | 3 +-- 5 files changed, 7 insertions(+), 11 deletions(-) diff --git a/ppdet/modeling/heads/detr_head.py b/ppdet/modeling/heads/detr_head.py index ec511654b..5b55642e4 100644 --- a/ppdet/modeling/heads/detr_head.py +++ b/ppdet/modeling/heads/detr_head.py @@ -16,13 +16,12 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import math import paddle import paddle.nn as nn import paddle.nn.functional as F from ppdet.core.workspace import register import pycocotools.mask as mask_util -from ..initializer import * +from ..initializer import linear_init_ __all__ = ['DETRHead'] diff --git a/ppdet/modeling/initializer.py b/ppdet/modeling/initializer.py index 0e9aef403..f7e83098b 100644 --- a/ppdet/modeling/initializer.py +++ b/ppdet/modeling/initializer.py @@ -50,9 +50,7 @@ def _no_grad_normal_(tensor, mean=0., std=1.): def _no_grad_fill_(tensor, value=0.): with paddle.no_grad(): - v = paddle.rand(shape=tensor.shape, dtype=tensor.dtype) - v[...] = value - tensor.set_value(v) + tensor.set_value(paddle.full_like(tensor, value, dtype=tensor.dtype)) return tensor diff --git a/ppdet/modeling/losses/detr_loss.py b/ppdet/modeling/losses/detr_loss.py index 24eed2e78..5a589d4a2 100644 --- a/ppdet/modeling/losses/detr_loss.py +++ b/ppdet/modeling/losses/detr_loss.py @@ -21,7 +21,7 @@ import paddle.nn as nn import paddle.nn.functional as F from ppdet.core.workspace import register from .iou_loss import GIoULoss -from ..transformers import bbox_cxcywh_to_xyxy, bbox_overlaps, sigmoid_focal_loss +from ..transformers import bbox_cxcywh_to_xyxy, sigmoid_focal_loss __all__ = ['DETRLoss'] @@ -211,7 +211,7 @@ class DETRLoss(nn.Layer): num_gts = paddle.clip( num_gts / paddle.distributed.get_world_size(), min=1).item() except: - num_gts = max(num_gts, 1) + num_gts = max(num_gts.item(), 1) total_loss = dict() total_loss.update( self._get_loss_class(logits[-1], gt_class, match_indices, diff --git a/ppdet/modeling/transformers/detr_transformer.py b/ppdet/modeling/transformers/detr_transformer.py index 9069ee8c4..65eb12855 100644 --- a/ppdet/modeling/transformers/detr_transformer.py +++ b/ppdet/modeling/transformers/detr_transformer.py @@ -23,8 +23,8 @@ import paddle.nn.functional as F from ppdet.core.workspace import register from ..layers import MultiHeadAttention, _convert_attention_mask from .position_encoding import PositionEmbedding -from .utils import * -from ..initializer import * +from .utils import _get_clones +from ..initializer import linear_init_, conv_init_, xavier_uniform_, normal_ __all__ = ['DETRTransformer'] diff --git a/ppdet/modeling/transformers/utils.py b/ppdet/modeling/transformers/utils.py index d8abad9fb..5756cfe85 100644 --- a/ppdet/modeling/transformers/utils.py +++ b/ppdet/modeling/transformers/utils.py @@ -54,5 +54,4 @@ def sigmoid_focal_loss(logit, label, normalizer=1.0, alpha=0.25, gamma=2.0): if alpha >= 0: alpha_t = alpha * label + (1 - alpha) * (1 - label) loss = alpha_t * loss - return loss.mean(1).sum() / normalizer if normalizer > 1. else loss.mean( - 1).sum() + return loss.mean(1).sum() / normalizer -- GitLab