未验证 提交 1ba91c0e 编写于 作者: F Feng Ni 提交者: GitHub

[MOT] fix jde head (#4652)

上级 a3aa1fb5
...@@ -16,8 +16,6 @@ from __future__ import absolute_import ...@@ -16,8 +16,6 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import paddle
from ppdet.modeling.mot.utils import scale_coords
from ppdet.core.workspace import register, create from ppdet.core.workspace import register, create
from .meta_arch import BaseArch from .meta_arch import BaseArch
...@@ -73,8 +71,11 @@ class JDE(BaseArch): ...@@ -73,8 +71,11 @@ class JDE(BaseArch):
emb_feats = det_outs['emb_feats'] emb_feats = det_outs['emb_feats']
loss_confs = det_outs['det_losses']['loss_confs'] loss_confs = det_outs['det_losses']['loss_confs']
loss_boxes = det_outs['det_losses']['loss_boxes'] loss_boxes = det_outs['det_losses']['loss_boxes']
jde_losses = self.reid(emb_feats, self.inputs, loss_confs, jde_losses = self.reid(
loss_boxes) emb_feats,
self.inputs,
loss_confs=loss_confs,
loss_boxes=loss_boxes)
return jde_losses return jde_losses
else: else:
if self.metric == 'MOTDet': if self.metric == 'MOTDet':
...@@ -84,32 +85,18 @@ class JDE(BaseArch): ...@@ -84,32 +85,18 @@ class JDE(BaseArch):
} }
return det_results return det_results
elif self.metric == 'ReID':
emb_feats = det_outs['emb_feats']
embs_and_gts = self.reid(emb_feats, self.inputs, test_emb=True)
return embs_and_gts
elif self.metric == 'MOT': elif self.metric == 'MOT':
emb_feats = det_outs['emb_feats'] emb_feats = det_outs['emb_feats']
emb_outs = self.reid(emb_feats, self.inputs) bboxes = det_outs['bbox']
boxes_idx = det_outs['boxes_idx'] boxes_idx = det_outs['boxes_idx']
bbox = det_outs['bbox']
input_shape = self.inputs['image'].shape[2:]
im_shape = self.inputs['im_shape']
scale_factor = self.inputs['scale_factor']
bbox[:, 2:] = scale_coords(bbox[:, 2:], input_shape, im_shape,
scale_factor)
nms_keep_idx = det_outs['nms_keep_idx'] nms_keep_idx = det_outs['nms_keep_idx']
pred_dets = paddle.concat((bbox[:, 2:], bbox[:, 1:2], bbox[:, 0:1]), axis=1) pred_dets, pred_embs = self.reid(
emb_feats,
emb_valid = paddle.gather_nd(emb_outs, boxes_idx) self.inputs,
pred_embs = paddle.gather_nd(emb_valid, nms_keep_idx) bboxes=bboxes,
boxes_idx=boxes_idx,
nms_keep_idx=nms_keep_idx)
return pred_dets, pred_embs return pred_dets, pred_embs
else: else:
......
...@@ -61,13 +61,9 @@ class FairMOTEmbeddingHead(nn.Layer): ...@@ -61,13 +61,9 @@ class FairMOTEmbeddingHead(nn.Layer):
if num_classes == 1: if num_classes == 1:
nID = self.num_identities_dict[0] # single class nID = self.num_identities_dict[0] # single class
self.classifier = nn.Linear( self.classifier = nn.Linear(
ch_emb, ch_emb, nID, weight_attr=param_attr, bias_attr=bias_attr)
nID,
weight_attr=param_attr,
bias_attr=bias_attr)
# When num_identities(nID) is 1, emb_scale is set as 1 # When num_identities(nID) is 1, emb_scale is set as 1
self.emb_scale = math.sqrt(2) * math.log( self.emb_scale = math.sqrt(2) * math.log(nID - 1) if nID > 1 else 1
nID - 1) if nID > 1 else 1
else: else:
self.classifiers = dict() self.classifiers = dict()
self.emb_scale_dict = dict() self.emb_scale_dict = dict()
...@@ -84,7 +80,7 @@ class FairMOTEmbeddingHead(nn.Layer): ...@@ -84,7 +80,7 @@ class FairMOTEmbeddingHead(nn.Layer):
input_shape = input_shape[0] input_shape = input_shape[0]
return {'in_channels': input_shape.channels} return {'in_channels': input_shape.channels}
def process_by_class(self, det_outs, embedding, bbox_inds, topk_clses): def process_by_class(self, bboxes, embedding, bbox_inds, topk_clses):
pred_dets, pred_embs = [], [] pred_dets, pred_embs = [], []
for cls_id in range(self.num_classes): for cls_id in range(self.num_classes):
inds_masks = topk_clses == cls_id inds_masks = topk_clses == cls_id
...@@ -97,8 +93,8 @@ class FairMOTEmbeddingHead(nn.Layer): ...@@ -97,8 +93,8 @@ class FairMOTEmbeddingHead(nn.Layer):
cls_inds_mask = inds_masks > 0 cls_inds_mask = inds_masks > 0
bbox_mask = paddle.nonzero(cls_inds_mask) bbox_mask = paddle.nonzero(cls_inds_mask)
cls_det_outs = paddle.gather_nd(det_outs, bbox_mask) cls_bboxes = paddle.gather_nd(bboxes, bbox_mask)
pred_dets.append(cls_det_outs) pred_dets.append(cls_bboxes)
cls_inds = paddle.masked_select(bbox_inds, cls_inds_mask) cls_inds = paddle.masked_select(bbox_inds, cls_inds_mask)
cls_inds = cls_inds.unsqueeze(-1) cls_inds = cls_inds.unsqueeze(-1)
...@@ -108,12 +104,12 @@ class FairMOTEmbeddingHead(nn.Layer): ...@@ -108,12 +104,12 @@ class FairMOTEmbeddingHead(nn.Layer):
return paddle.concat(pred_dets), paddle.concat(pred_embs) return paddle.concat(pred_dets), paddle.concat(pred_embs)
def forward(self, def forward(self,
feat, neck_feat,
inputs, inputs,
det_outs=None, bboxes=None,
bbox_inds=None, bbox_inds=None,
topk_clses=None): topk_clses=None):
reid_feat = self.reid(feat) reid_feat = self.reid(neck_feat)
if self.training: if self.training:
if self.num_classes == 1: if self.num_classes == 1:
loss = self.get_loss(reid_feat, inputs) loss = self.get_loss(reid_feat, inputs)
...@@ -121,18 +117,18 @@ class FairMOTEmbeddingHead(nn.Layer): ...@@ -121,18 +117,18 @@ class FairMOTEmbeddingHead(nn.Layer):
loss = self.get_mc_loss(reid_feat, inputs) loss = self.get_mc_loss(reid_feat, inputs)
return loss return loss
else: else:
assert det_outs is not None and bbox_inds is not None assert bboxes is not None and bbox_inds is not None
reid_feat = F.normalize(reid_feat) reid_feat = F.normalize(reid_feat)
embedding = paddle.transpose(reid_feat, [0, 2, 3, 1]) embedding = paddle.transpose(reid_feat, [0, 2, 3, 1])
embedding = paddle.reshape(embedding, [-1, self.ch_emb]) embedding = paddle.reshape(embedding, [-1, self.ch_emb])
# embedding shape: [bs * h * w, ch_emb] # embedding shape: [bs * h * w, ch_emb]
if self.num_classes == 1: if self.num_classes == 1:
pred_dets = det_outs pred_dets = bboxes
pred_embs = paddle.gather(embedding, bbox_inds) pred_embs = paddle.gather(embedding, bbox_inds)
else: else:
pred_dets, pred_embs = self.process_by_class( pred_dets, pred_embs = self.process_by_class(
det_outs, embedding, bbox_inds, topk_clses) bboxes, embedding, bbox_inds, topk_clses)
return pred_dets, pred_embs return pred_dets, pred_embs
def get_loss(self, feat, inputs): def get_loss(self, feat, inputs):
......
...@@ -17,6 +17,7 @@ from __future__ import division ...@@ -17,6 +17,7 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import math import math
import numpy as np
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
...@@ -115,31 +116,58 @@ class JDEEmbeddingHead(nn.Layer): ...@@ -115,31 +116,58 @@ class JDEEmbeddingHead(nn.Layer):
def forward(self, def forward(self,
identify_feats, identify_feats,
targets=None, targets,
loss_confs=None, loss_confs=None,
loss_boxes=None, loss_boxes=None,
test_emb=False): bboxes=None,
boxes_idx=None,
nms_keep_idx=None):
assert self.num_classes == 1, 'JDE only support sindle class MOT.'
assert len(identify_feats) == self.anchor_levels assert len(identify_feats) == self.anchor_levels
ide_outs = [] ide_outs = []
for feat, ide_head in zip(identify_feats, self.identify_outputs): for feat, ide_head in zip(identify_feats, self.identify_outputs):
ide_outs.append(ide_head(feat)) ide_outs.append(ide_head(feat))
if self.training: if self.training:
assert targets != None
assert len(loss_confs) == len(loss_boxes) == self.anchor_levels assert len(loss_confs) == len(loss_boxes) == self.anchor_levels
loss_ides = self.emb_loss(ide_outs, targets, self.emb_scale, loss_ides = self.emb_loss(ide_outs, targets, self.emb_scale,
self.classifier) self.classifier)
return self.jde_loss(loss_confs, loss_boxes, loss_ides, jde_losses = self.jde_loss(
self.loss_params_cls, self.loss_params_reg, loss_confs, loss_boxes, loss_ides, self.loss_params_cls,
self.loss_params_ide, targets) self.loss_params_reg, self.loss_params_ide, targets)
else: return jde_losses
if test_emb:
assert targets != None
embs_and_gts = self.get_emb_and_gt_outs(ide_outs, targets)
return embs_and_gts
else: else:
assert bboxes is not None
assert boxes_idx is not None
assert nms_keep_idx is not None
emb_outs = self.get_emb_outs(ide_outs) emb_outs = self.get_emb_outs(ide_outs)
return emb_outs emb_valid = paddle.gather_nd(emb_outs, boxes_idx)
pred_embs = paddle.gather_nd(emb_valid, nms_keep_idx)
input_shape = targets['image'].shape[2:]
# input_shape: [h, w], before data transforms, set in model config
im_shape = targets['im_shape'][0].numpy()
# im_shape: [new_h, new_w], after data transforms
scale_factor = targets['scale_factor'][0].numpy()
bboxes[:, 2:] = self.scale_coords(bboxes[:, 2:], input_shape,
im_shape, scale_factor)
# tlwhs, scores, cls_ids
pred_dets = paddle.concat(
(bboxes[:, 2:], bboxes[:, 1:2], bboxes[:, 0:1]), axis=1)
return pred_dets, pred_embs
def scale_coords(self, coords, input_shape, im_shape, scale_factor):
ratio = scale_factor[0]
pad_w = (input_shape[1] - int(im_shape[1])) / 2
pad_h = (input_shape[0] - int(im_shape[0])) / 2
coords = paddle.cast(coords, 'float32')
coords[:, 0::2] -= pad_w
coords[:, 1::2] -= pad_h
coords[:, 0:4] /= ratio
coords[:, :4] = paddle.clip(
coords[:, :4], min=0, max=coords[:, :4].max())
return coords.round()
def get_emb_and_gt_outs(self, ide_outs, targets): def get_emb_and_gt_outs(self, ide_outs, targets):
emb_and_gts = [] emb_and_gts = []
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册