From bd7838d01ff3c84364f3403a494281e1c5a2549f Mon Sep 17 00:00:00 2001 From: cnn Date: Sun, 8 Aug 2021 00:59:55 +0800 Subject: [PATCH] fix detach bug (#3913) --- configs/dota/README.md | 3 +- configs/dota/s2anet_1x_spine.yml | 1 + configs/dota/s2anet_alignconv_2x_dota.yml | 1 + configs/dota/s2anet_conv_2x_dota.yml | 1 + ppdet/modeling/heads/s2anet_head.py | 38 +++++++++-------------- 5 files changed, 20 insertions(+), 24 deletions(-) diff --git a/configs/dota/README.md b/configs/dota/README.md index f1512932f..12c0da0da 100644 --- a/configs/dota/README.md +++ b/configs/dota/README.md @@ -121,7 +121,7 @@ python3.7 tools/infer.py -c configs/dota/s2anet_1x_spine.yml -o weights=output/s ### 5. DOTA数据评估 执行如下命令,会在`output`文件夹下将每个图像预测结果保存到同文件夹名的txt文本中。 ``` -python3.7 tools/infer.py -c configs/dota/s2anet_1x_dota.yml -o weights=./weights/s2anet_1x_dota.pdparams --infer_dir=dota_test_images --draw_threshold=0.05 --save_txt=True --output_dir=output +python3.7 tools/infer.py -c configs/dota/s2anet_alignconv_2x_dota.yml -o weights=./weights/s2anet_alignconv_2x_dota.pdparams --infer_dir=dota_test_images --draw_threshold=0.05 --save_txt=True --output_dir=output ``` 请参考[DOTA_devkit](https://github.com/CAPTAIN-WHU/DOTA_devkit) 生成评估文件,评估文件格式请参考[DOTA Test](http://captain.whu.edu.cn/DOTAweb/tasks.html) ,生成zip文件,每个类一个txt文件,txt文件中每行格式为:`image_id score x1 y1 x2 y2 x3 y3 x4 y4`,提交服务器进行评估。 @@ -144,6 +144,7 @@ Paddle中`multiclass_nms`算子的输入支持四边形输入,因此部署时 部署教程请参考[预测部署](../../deploy/README.md) +**注意:** 由于paddle.detach函数动转静时会导致导出模型尺寸错误,因此在配置文件中增加了`is_training`参数,导出模型预测部署时需要将改参数设置为`False` ## Citations ``` diff --git a/configs/dota/s2anet_1x_spine.yml b/configs/dota/s2anet_1x_spine.yml index 5cf215b54..6ebe668c8 100644 --- a/configs/dota/s2anet_1x_spine.yml +++ b/configs/dota/s2anet_1x_spine.yml @@ -27,3 +27,4 @@ S2ANetHead: reg_loss_weight: [1.0, 1.0, 1.0, 1.0, 1.05] cls_loss_weight: [1.05, 1.0] reg_loss_type: 'l1' + is_training: True diff --git a/configs/dota/s2anet_alignconv_2x_dota.yml b/configs/dota/s2anet_alignconv_2x_dota.yml index 48e2d2b38..1f2d2659d 100644 --- a/configs/dota/s2anet_alignconv_2x_dota.yml +++ b/configs/dota/s2anet_alignconv_2x_dota.yml @@ -24,3 +24,4 @@ S2ANetHead: reg_loss_weight: [1.0, 1.0, 1.0, 1.0, 1.05] cls_loss_weight: [1.05, 1.0] reg_loss_type: 'l1' + is_training: True diff --git a/configs/dota/s2anet_conv_2x_dota.yml b/configs/dota/s2anet_conv_2x_dota.yml index 3d95c566e..be25ef32a 100644 --- a/configs/dota/s2anet_conv_2x_dota.yml +++ b/configs/dota/s2anet_conv_2x_dota.yml @@ -21,3 +21,4 @@ S2ANetHead: use_sigmoid_cls: True reg_loss_weight: [1.0, 1.0, 1.0, 1.0, 1.1] cls_loss_weight: [1.1, 1.05] + is_training: True diff --git a/ppdet/modeling/heads/s2anet_head.py b/ppdet/modeling/heads/s2anet_head.py index 024592372..2db15d1de 100644 --- a/ppdet/modeling/heads/s2anet_head.py +++ b/ppdet/modeling/heads/s2anet_head.py @@ -228,9 +228,10 @@ class S2ANetHead(nn.Layer): align_conv_size=3, use_sigmoid_cls=True, anchor_assign=RBoxAssigner().__dict__, - reg_loss_weight=[1.0, 1.0, 1.0, 1.0, 1.0], - cls_loss_weight=[1.0, 1.0], - reg_loss_type='l1'): + reg_loss_weight=[1.0, 1.0, 1.0, 1.0, 1.1], + cls_loss_weight=[1.1, 1.05], + reg_loss_type='l1', + is_training=True): super(S2ANetHead, self).__init__() self.stacked_convs = stacked_convs self.feat_in = feat_in @@ -256,6 +257,7 @@ class S2ANetHead(nn.Layer): self.alpha = 1.0 self.beta = 1.0 self.reg_loss_type = reg_loss_type + self.is_training = is_training self.s2anet_head_out = None @@ -446,10 +448,12 @@ class S2ANetHead(nn.Layer): init_anchors = self.rect2rbox(init_anchors) self.base_anchors_list.append(init_anchors) - fam_reg1 = fam_reg - fam_reg1.stop_gradient = True - refine_anchor = self.bbox_decode(fam_reg1, init_anchors) - #refine_anchor = self.bbox_decode(fam_reg.detach(), init_anchors) + if self.is_training: + refine_anchor = self.bbox_decode(fam_reg.detach(), init_anchors) + else: + fam_reg1 = fam_reg.clone() + fam_reg1.stop_gradient = True + refine_anchor = self.bbox_decode(fam_reg1, init_anchors) self.refine_anchor_list.append(refine_anchor) @@ -615,19 +619,13 @@ class S2ANetHead(nn.Layer): iou = rbox_iou(fam_bbox_decode, bbox_gt_bboxes) iou = paddle.diag(iou) - if reg_loss_type == 'iou': - EPS = paddle.to_tensor( - 1e-8, dtype='float32', stop_gradient=True) - iou_factor = -1.0 * paddle.log(iou + EPS) / (fam_bbox + EPS) - iou_factor.stop_gradient = True - #fam_bbox = fam_bbox * iou_factor - elif reg_loss_type == 'gwd': + if reg_loss_type == 'gwd': bbox_gt_bboxes_level = bbox_gt_bboxes[st_idx:st_idx + feat_anchor_num, :] fam_bbox_total = self.gwd_loss(fam_bbox_decode, bbox_gt_bboxes_level) fam_bbox_total = fam_bbox_total * feat_bbox_weights - fam_bbox_total = paddle.sum(fam_bbox_total) + fam_bbox_total = paddle.sum(fam_bbox_total) / num_total_samples fam_bbox_losses.append(fam_bbox_total) st_idx += feat_anchor_num @@ -735,19 +733,13 @@ class S2ANetHead(nn.Layer): iou = rbox_iou(odm_bbox_decode, bbox_gt_bboxes) iou = paddle.diag(iou) - if reg_loss_type == 'iou': - EPS = paddle.to_tensor( - 1e-8, dtype='float32', stop_gradient=True) - iou_factor = -1.0 * paddle.log(iou + EPS) / (odm_bbox + EPS) - iou_factor.stop_gradient = True - # odm_bbox = odm_bbox * iou_factor - elif reg_loss_type == 'gwd': + if reg_loss_type == 'gwd': bbox_gt_bboxes_level = bbox_gt_bboxes[st_idx:st_idx + feat_anchor_num, :] odm_bbox_total = self.gwd_loss(odm_bbox_decode, bbox_gt_bboxes_level) odm_bbox_total = odm_bbox_total * feat_bbox_weights - odm_bbox_total = paddle.sum(odm_bbox_total) + odm_bbox_total = paddle.sum(odm_bbox_total) / num_total_samples odm_bbox_losses.append(odm_bbox_total) st_idx += feat_anchor_num -- GitLab