tracker.py 20.1 KB
Newer Older
G
George Ni 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import cv2
import glob
import paddle
import numpy as np
24
from collections import defaultdict
G
George Ni 已提交
25 26 27

from ppdet.core.workspace import create
from ppdet.utils.checkpoint import load_weight, load_pretrain_weight
G
George Ni 已提交
28
from ppdet.modeling.mot.utils import Detection, get_crops, scale_coords, clip_box
29
from ppdet.modeling.mot.utils import MOTTimer, load_det_results, write_mot_results, save_vis_results
G
George Ni 已提交
30

G
George Ni 已提交
31
from ppdet.metrics import Metric, MOTMetric, KITTIMOTMetric
32
from ppdet.metrics import MCMOTMetric
G
George Ni 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
import ppdet.utils.stats as stats

from .callbacks import Callback, ComposeCallback

from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)

__all__ = ['Tracker']


class Tracker(object):
    def __init__(self, cfg, mode='eval'):
        self.cfg = cfg
        assert mode.lower() in ['test', 'eval'], \
                "mode should be 'test' or 'eval'"
        self.mode = mode.lower()
        self.optimizer = None

        # build MOT data loader
        self.dataset = cfg['{}MOTDataset'.format(self.mode.capitalize())]

        # build model
        self.model = create(cfg.architecture)

        self.status = {}
        self.start_epoch = 0

        # initial default callbacks
        self._init_callbacks()

        # initial default metrics
        self._init_metrics()
        self._reset_metrics()

    def _init_callbacks(self):
        self._callbacks = []
        self._compose_callback = None

    def _init_metrics(self):
        if self.mode in ['test']:
            self._metrics = []
            return

        if self.cfg.metric == 'MOT':
            self._metrics = [MOTMetric(), ]
78 79
        elif self.cfg.metric == 'MCMOT':
            self._metrics = [MCMOTMetric(self.cfg.num_classes), ]
G
George Ni 已提交
80 81
        elif self.cfg.metric == 'KITTI':
            self._metrics = [KITTIMOTMetric(), ]
G
George Ni 已提交
82
        else:
83
            logger.warning("Metric not support for metric type {}".format(
G
George Ni 已提交
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
                self.cfg.metric))
            self._metrics = []

    def _reset_metrics(self):
        for metric in self._metrics:
            metric.reset()

    def register_callbacks(self, callbacks):
        callbacks = [h for h in list(callbacks) if h is not None]
        for c in callbacks:
            assert isinstance(c, Callback), \
                    "metrics shoule be instances of subclass of Metric"
        self._callbacks.extend(callbacks)
        self._compose_callback = ComposeCallback(self._callbacks)

    def register_metrics(self, metrics):
        metrics = [m for m in list(metrics) if m is not None]
        for m in metrics:
            assert isinstance(m, Metric), \
                    "metrics shoule be instances of subclass of Metric"
        self._metrics.extend(metrics)

    def load_weights_jde(self, weights):
        load_weight(self.model, weights, self.optimizer)

    def load_weights_sde(self, det_weights, reid_weights):
        if self.model.detector:
111 112 113 114
            load_weight(self.model.detector, det_weights)
            load_weight(self.model.reid, reid_weights)
        else:
            load_weight(self.model.reid, reid_weights, self.optimizer)
G
George Ni 已提交
115 116 117 118 119

    def _eval_seq_jde(self,
                      dataloader,
                      save_dir=None,
                      show_image=False,
120 121
                      frame_rate=30,
                      draw_threshold=0):
G
George Ni 已提交
122 123 124 125 126
        if save_dir:
            if not os.path.exists(save_dir): os.makedirs(save_dir)
        tracker = self.model.tracker
        tracker.max_time_lost = int(frame_rate / 30.0 * tracker.track_buffer)

127
        timer = MOTTimer()
G
George Ni 已提交
128 129 130
        frame_id = 0
        self.status['mode'] = 'track'
        self.model.eval()
131 132
        results = defaultdict(list)  # support single class and multi classes

G
George Ni 已提交
133 134 135 136 137 138 139
        for step_id, data in enumerate(dataloader):
            self.status['step_id'] = step_id
            if frame_id % 40 == 0:
                logger.info('Processing frame {} ({:.2f} fps)'.format(
                    frame_id, 1. / max(1e-5, timer.average_time)))
            # forward
            timer.tic()
140
            pred_dets, pred_embs = self.model(data)
G
George Ni 已提交
141

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
            pred_dets, pred_embs = pred_dets.numpy(), pred_embs.numpy()
            online_targets_dict = self.model.tracker.update(pred_dets,
                                                            pred_embs)
            online_tlwhs = defaultdict(list)
            online_scores = defaultdict(list)
            online_ids = defaultdict(list)
            for cls_id in range(self.cfg.num_classes):
                online_targets = online_targets_dict[cls_id]
                for t in online_targets:
                    tlwh = t.tlwh
                    tid = t.track_id
                    tscore = t.score
                    if tlwh[2] * tlwh[3] <= tracker.min_box_area: continue
                    if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[
                            3] > tracker.vertical_ratio:
                        continue
                    online_tlwhs[cls_id].append(tlwh)
                    online_ids[cls_id].append(tid)
                    online_scores[cls_id].append(tscore)
                # save results
                results[cls_id].append(
                    (frame_id + 1, online_tlwhs[cls_id], online_scores[cls_id],
                     online_ids[cls_id]))
G
George Ni 已提交
165

166 167 168 169
            timer.toc()
            save_vis_results(data, frame_id, online_ids, online_tlwhs,
                             online_scores, timer.average_time, show_image,
                             save_dir, self.cfg.num_classes)
G
George Ni 已提交
170 171 172 173 174 175 176 177 178
            frame_id += 1

        return results, frame_id, timer.average_time, timer.calls

    def _eval_seq_sde(self,
                      dataloader,
                      save_dir=None,
                      show_image=False,
                      frame_rate=30,
179
                      scaled=False,
180 181
                      det_file='',
                      draw_threshold=0):
G
George Ni 已提交
182 183 184 185
        if save_dir:
            if not os.path.exists(save_dir): os.makedirs(save_dir)
        use_detector = False if not self.model.detector else True

186
        timer = MOTTimer()
G
George Ni 已提交
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
        results = []
        frame_id = 0
        self.status['mode'] = 'track'
        self.model.eval()
        self.model.reid.eval()
        if not use_detector:
            dets_list = load_det_results(det_file, len(dataloader))
            logger.info('Finish loading detection results file {}.'.format(
                det_file))

        for step_id, data in enumerate(dataloader):
            self.status['step_id'] = step_id
            if frame_id % 40 == 0:
                logger.info('Processing frame {} ({:.2f} fps)'.format(
                    frame_id, 1. / max(1e-5, timer.average_time)))

G
George Ni 已提交
203 204 205 206
            ori_image = data['ori_image']
            input_shape = data['image'].shape[2:]
            im_shape = data['im_shape']
            scale_factor = data['scale_factor']
207 208

            # forward
G
George Ni 已提交
209 210 211 212 213
            timer.tic()
            if not use_detector:
                dets = dets_list[frame_id]
                bbox_tlwh = paddle.to_tensor(dets['bbox'], dtype='float32')
                if bbox_tlwh.shape[0] > 0:
214 215 216 217 218
                    # detector outputs: pred_cls_ids, pred_scores, pred_bboxes
                    pred_cls_ids = paddle.to_tensor(
                        dets['cls_id'], dtype='float32').unsqueeze(1)
                    pred_scores = paddle.to_tensor(
                        dets['score'], dtype='float32').unsqueeze(1)
G
George Ni 已提交
219 220 221 222 223
                    pred_bboxes = paddle.concat(
                        (bbox_tlwh[:, 0:2],
                         bbox_tlwh[:, 2:4] + bbox_tlwh[:, 0:2]),
                        axis=1)
                else:
224 225 226 227 228
                    logger.warning(
                        'Frame {} has not object, try to modify score threshold.'.
                        format(frame_id))
                    frame_id += 1
                    continue
G
George Ni 已提交
229 230 231
            else:
                outs = self.model.detector(data)
                if outs['bbox_num'] > 0:
232 233 234
                    # detector outputs: pred_cls_ids, pred_scores, pred_bboxes
                    pred_cls_ids = outs['bbox'][:, 0:1]
                    pred_scores = outs['bbox'][:, 1:2]
235
                    if not scaled:
236 237 238
                        # scaled means whether the coords after detector outputs
                        # have been scaled back to the original image, set True 
                        # in general detector, set False in JDE YOLOv3.
239 240 241 242 243
                        pred_bboxes = scale_coords(outs['bbox'][:, 2:],
                                                   input_shape, im_shape,
                                                   scale_factor)
                    else:
                        pred_bboxes = outs['bbox'][:, 2:]
G
George Ni 已提交
244
                else:
245 246 247 248 249
                    logger.warning(
                        'Frame {} has not object, try to modify score threshold.'.
                        format(frame_id))
                    frame_id += 1
                    continue
G
George Ni 已提交
250

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
            pred_xyxys, keep_idx = clip_box(pred_bboxes, input_shape, im_shape,
                                            scale_factor)
            pred_scores = paddle.gather_nd(pred_scores, keep_idx).unsqueeze(1)
            pred_cls_ids = paddle.gather_nd(pred_cls_ids, keep_idx).unsqueeze(1)
            pred_tlwhs = paddle.concat(
                (pred_xyxys[:, 0:2],
                 pred_xyxys[:, 2:4] - pred_xyxys[:, 0:2] + 1),
                axis=1)
            pred_dets = paddle.concat(
                (pred_tlwhs, pred_scores, pred_cls_ids), axis=1)

            tracker = self.model.tracker
            crops = get_crops(
                pred_xyxys,
                ori_image,
                w=tracker.input_size[0],
                h=tracker.input_size[1])
G
George Ni 已提交
268 269 270
            crops = paddle.to_tensor(crops)

            data.update({'crops': crops})
271 272 273 274 275 276 277 278
            pred_embs = self.model(data)

            tracker.predict()
            online_targets = tracker.update(pred_dets, pred_embs)

            online_tlwhs, online_scores, online_ids = [], [], []
            for t in online_targets:
                if not t.is_confirmed() or t.time_since_update > 1:
G
George Ni 已提交
279
                    continue
280 281 282 283 284 285 286 287 288 289 290
                tlwh = t.to_tlwh()
                tscore = t.score
                tid = t.track_id
                if tscore < draw_threshold: continue
                if tlwh[2] * tlwh[3] <= tracker.min_box_area: continue
                if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[
                        3] > tracker.vertical_ratio:
                    continue
                online_tlwhs.append(tlwh)
                online_scores.append(tscore)
                online_ids.append(tid)
G
George Ni 已提交
291 292 293
            timer.toc()

            # save results
G
George Ni 已提交
294 295
            results.append(
                (frame_id + 1, online_tlwhs, online_scores, online_ids))
296 297 298
            save_vis_results(data, frame_id, online_ids, online_tlwhs,
                             online_scores, timer.average_time, show_image,
                             save_dir, self.cfg.num_classes)
G
George Ni 已提交
299 300 301 302 303 304 305 306 307 308 309 310 311
            frame_id += 1

        return results, frame_id, timer.average_time, timer.calls

    def mot_evaluate(self,
                     data_root,
                     seqs,
                     output_dir,
                     data_type='mot',
                     model_type='JDE',
                     save_images=False,
                     save_videos=False,
                     show_image=False,
312
                     scaled=False,
G
George Ni 已提交
313 314 315 316
                     det_results_dir=''):
        if not os.path.exists(output_dir): os.makedirs(output_dir)
        result_root = os.path.join(output_dir, 'mot_results')
        if not os.path.exists(result_root): os.makedirs(result_root)
317 318
        assert data_type in ['mot', 'mcmot', 'kitti'], \
            "data_type should be 'mot', 'mcmot' or 'kitti'"
G
George Ni 已提交
319 320 321 322 323 324 325
        assert model_type in ['JDE', 'DeepSORT', 'FairMOT'], \
            "model_type should be 'JDE', 'DeepSORT' or 'FairMOT'"

        # run tracking
        n_frame = 0
        timer_avgs, timer_calls = [], []
        for seq in seqs:
326 327 328 329
            infer_dir = os.path.join(data_root, seq)
            if not os.path.exists(infer_dir) or not os.path.isdir(infer_dir):
                logger.warning("Seq {} error, {} has no images.".format(
                    seq, infer_dir))
G
George Ni 已提交
330
                continue
331 332 333 334
            if os.path.exists(os.path.join(infer_dir, 'img1')):
                infer_dir = os.path.join(infer_dir, 'img1')

            frame_rate = 30
G
George Ni 已提交
335
            seqinfo = os.path.join(data_root, seq, 'seqinfo.ini')
336 337 338 339
            if os.path.exists(seqinfo):
                meta_info = open(seqinfo).read()
                frame_rate = int(meta_info[meta_info.find('frameRate') + 10:
                                           meta_info.find('\nseqLength')])
G
George Ni 已提交
340

G
George Ni 已提交
341 342 343 344
            save_dir = os.path.join(output_dir, 'mot_outputs',
                                    seq) if save_images or save_videos else None
            logger.info('start seq: {}'.format(seq))

345
            self.dataset.set_images(self.get_infer_images(infer_dir))
G
George Ni 已提交
346 347 348
            dataloader = create('EvalMOTReader')(self.dataset, 0)

            result_filename = os.path.join(result_root, '{}.txt'.format(seq))
349

G
George Ni 已提交
350 351 352 353 354 355 356 357 358 359 360 361 362
            with paddle.no_grad():
                if model_type in ['JDE', 'FairMOT']:
                    results, nf, ta, tc = self._eval_seq_jde(
                        dataloader,
                        save_dir=save_dir,
                        show_image=show_image,
                        frame_rate=frame_rate)
                elif model_type in ['DeepSORT']:
                    results, nf, ta, tc = self._eval_seq_sde(
                        dataloader,
                        save_dir=save_dir,
                        show_image=show_image,
                        frame_rate=frame_rate,
363
                        scaled=scaled,
G
George Ni 已提交
364 365 366 367
                        det_file=os.path.join(det_results_dir,
                                              '{}.txt'.format(seq)))
                else:
                    raise ValueError(model_type)
G
George Ni 已提交
368

369 370
            write_mot_results(result_filename, results, data_type,
                              self.cfg.num_classes)
G
George Ni 已提交
371 372 373 374 375
            n_frame += nf
            timer_avgs.append(ta)
            timer_calls.append(tc)

            if save_videos:
G
George Ni 已提交
376 377
                output_video_path = os.path.join(save_dir, '..',
                                                 '{}_vis.mp4'.format(seq))
F
Feng Ni 已提交
378
                cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(
G
George Ni 已提交
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
                    save_dir, output_video_path)
                os.system(cmd_str)
                logger.info('Save video in {}.'.format(output_video_path))

            logger.info('Evaluate seq: {}'.format(seq))
            # update metrics
            for metric in self._metrics:
                metric.update(data_root, seq, data_type, result_root,
                              result_filename)

        timer_avgs = np.asarray(timer_avgs)
        timer_calls = np.asarray(timer_calls)
        all_time = np.dot(timer_avgs, timer_calls)
        avg_time = all_time / np.sum(timer_calls)
        logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
            all_time, 1.0 / avg_time))

        # accumulate metric to log out
        for metric in self._metrics:
            metric.accumulate()
            metric.log()
        # reset metric states for metric may performed multiple times
        self._reset_metrics()

    def get_infer_images(self, infer_dir):
        assert infer_dir is None or os.path.isdir(infer_dir), \
            "{} is not a directory".format(infer_dir)
        images = set()
        assert os.path.isdir(infer_dir), \
            "infer_dir {} is not a directory".format(infer_dir)
        exts = ['jpg', 'jpeg', 'png', 'bmp']
        exts += [ext.upper() for ext in exts]
        for ext in exts:
            images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
        images = list(images)
        images.sort()
        assert len(images) > 0, "no image found in {}".format(infer_dir)
        logger.info("Found {} inference images in total.".format(len(images)))
        return images

    def mot_predict(self,
                    video_file,
421
                    frame_rate,
G
George Ni 已提交
422
                    image_dir,
G
George Ni 已提交
423 424 425 426 427 428
                    output_dir,
                    data_type='mot',
                    model_type='JDE',
                    save_images=False,
                    save_videos=True,
                    show_image=False,
429
                    scaled=False,
430 431
                    det_results_dir='',
                    draw_threshold=0.5):
G
George Ni 已提交
432 433 434 435 436 437 438
        assert video_file is not None or image_dir is not None, \
            "--video_file or --image_dir should be set."
        assert video_file is None or os.path.isfile(video_file), \
                "{} is not a file".format(video_file)
        assert image_dir is None or os.path.isdir(image_dir), \
                "{} is not a directory".format(image_dir)

G
George Ni 已提交
439 440 441
        if not os.path.exists(output_dir): os.makedirs(output_dir)
        result_root = os.path.join(output_dir, 'mot_results')
        if not os.path.exists(result_root): os.makedirs(result_root)
442 443
        assert data_type in ['mot', 'mcmot', 'kitti'], \
            "data_type should be 'mot', 'mcmot' or 'kitti'"
G
George Ni 已提交
444 445 446
        assert model_type in ['JDE', 'DeepSORT', 'FairMOT'], \
            "model_type should be 'JDE', 'DeepSORT' or 'FairMOT'"

G
George Ni 已提交
447 448 449
        # run tracking        
        if video_file:
            seq = video_file.split('/')[-1].split('.')[0]
450
            self.dataset.set_video(video_file, frame_rate)
G
George Ni 已提交
451 452 453 454 455 456 457 458 459 460 461 462 463
            logger.info('Starting tracking video {}'.format(video_file))
        elif image_dir:
            seq = image_dir.split('/')[-1].split('.')[0]
            images = [
                '{}/{}'.format(image_dir, x) for x in os.listdir(image_dir)
            ]
            images.sort()
            self.dataset.set_images(images)
            logger.info('Starting tracking folder {}, found {} images'.format(
                image_dir, len(images)))
        else:
            raise ValueError('--video_file or --image_dir should be set.')

G
George Ni 已提交
464 465 466 467 468
        save_dir = os.path.join(output_dir, 'mot_outputs',
                                seq) if save_images or save_videos else None

        dataloader = create('TestMOTReader')(self.dataset, 0)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
469 470
        if frame_rate == -1:
            frame_rate = self.dataset.frame_rate
G
George Ni 已提交
471

G
George Ni 已提交
472 473 474 475 476 477
        with paddle.no_grad():
            if model_type in ['JDE', 'FairMOT']:
                results, nf, ta, tc = self._eval_seq_jde(
                    dataloader,
                    save_dir=save_dir,
                    show_image=show_image,
478 479
                    frame_rate=frame_rate,
                    draw_threshold=draw_threshold)
G
George Ni 已提交
480 481 482 483 484 485
            elif model_type in ['DeepSORT']:
                results, nf, ta, tc = self._eval_seq_sde(
                    dataloader,
                    save_dir=save_dir,
                    show_image=show_image,
                    frame_rate=frame_rate,
486
                    scaled=scaled,
G
George Ni 已提交
487
                    det_file=os.path.join(det_results_dir,
488 489
                                          '{}.txt'.format(seq)),
                    draw_threshold=draw_threshold)
G
George Ni 已提交
490 491
            else:
                raise ValueError(model_type)
G
George Ni 已提交
492

493 494
        write_mot_results(result_filename, results, data_type,
                          self.cfg.num_classes)
G
George Ni 已提交
495

G
George Ni 已提交
496
        if save_videos:
G
George Ni 已提交
497 498
            output_video_path = os.path.join(save_dir, '..',
                                             '{}_vis.mp4'.format(seq))
F
Feng Ni 已提交
499
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(
G
George Ni 已提交
500 501 502
                save_dir, output_video_path)
            os.system(cmd_str)
            logger.info('Save video in {}'.format(output_video_path))