From b615336fe3a9ff740fdb1b889be7bc865b6d20fa Mon Sep 17 00:00:00 2001 From: Feng Ni Date: Mon, 28 Mar 2022 22:43:30 +0800 Subject: [PATCH] add_fairmot_airplane (#5487) --- configs/mot/fairmot/README.md | 27 +++++++++++++-- configs/mot/fairmot/README_cn.md | 20 +++++++++++ configs/mot/fairmot/_base_/fairmot_dla34.yml | 2 ++ .../mot/fairmot/_base_/fairmot_hardnet85.yml | 2 ++ .../_base_/fairmot_hrnetv2_w18_dlafpn.yml | 2 ++ .../fairmot_dla34_30e_1088x608_airplane.yml | 33 +++++++++++++++++++ 6 files changed, 83 insertions(+), 3 deletions(-) create mode 100644 configs/mot/fairmot/fairmot_dla34_30e_1088x608_airplane.yml diff --git a/configs/mot/fairmot/README.md b/configs/mot/fairmot/README.md index a1f32ff3b..25441f21c 100644 --- a/configs/mot/fairmot/README.md +++ b/configs/mot/fairmot/README.md @@ -61,9 +61,9 @@ PP-tracking provides an AI studio public project tutorial. Please refer to this | HarDNet-85 | 1088x608 | 74.7 | 70.7 | 3210 | 29790 | 109914 | - |[model](https://paddledet.bj.bcebos.com/models/mot/fairmot_enhance_hardnet85_30e_1088x608.pdparams) | [config](./fairmot_enhance_hardnet85_30e_1088x608.yml) | **Notes:** - - FairMOT enhance used 8 GPUs for training, and the crowdhuman dataset is added to the train-set during training. - - For FairMOT enhance DLA-34 the batch size is 16 on each GPU,and trained for 60 epoches. - - For FairMOT enhance HarDNet-85 the batch size is 10 on each GPU,and trained for 30 epoches. + - FairMOT enhance used 8 GPUs for training, and the crowdhuman dataset is added to the train-set during training. + - For FairMOT enhance DLA-34 the batch size is 16 on each GPU,and trained for 60 epoches. + - For FairMOT enhance HarDNet-85 the batch size is 10 on each GPU,and trained for 30 epoches. ### FairMOT light model ### Results on MOT-16 Test Set @@ -100,6 +100,27 @@ PP-tracking provides an AI studio public project tutorial. Please refer to this low_conf_thres: 0.2 ``` +### Fairmot transfer learning model + +### Results on GMOT-40 airplane subset +| backbone | input shape | MOTA | IDF1 | IDS | FP | FN | FPS | download | config | +| :--------------| :------- | :----: | :----: | :----: | :----: | :----: | :------: | :----: |:-----: | +| DLA-34 | 1088x608 | 96.6 | 94.7 | 19 | 300 | 466 | - |[model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_airplane.pdparams) | [config](./fairmot_dla34_30e_1088x608_airplane.yml) | + +**Note:** + - The dataset of this model is a subset of airport category extracted from GMOT-40 dataset. The download link provided by the PaddleDetection team is```wget https://bj.bcebos.com/v1/paddledet/data/mot/airplane.zip```, unzip and store it in the ```dataset/mot```, and then copy the ```airplane.train``` to ```dataset/mot/image_lists```. + - FairMOT model here uses the pedestrian FairMOT trained model for pre- training weights. The train-set used is the complete set of airplane, with a total of 4 video sequences, and it also used for evaluation. +- When applied to the tracking other objects, you should modify ```min_box_area``` and ```vertical_ratio``` of the tracker in the corresponding config file, like this: + ``` +JDETracker: + conf_thres: 0.4 + tracked_thresh: 0.4 + metric_type: cosine + min_box_area: 0 # 200 for pedestrian + vertical_ratio: 0 # 1.6 for pedestrian + ``` + + ## Getting Start ### 1. Training diff --git a/configs/mot/fairmot/README_cn.md b/configs/mot/fairmot/README_cn.md index b5ec21895..bb22459e8 100644 --- a/configs/mot/fairmot/README_cn.md +++ b/configs/mot/fairmot/README_cn.md @@ -97,6 +97,26 @@ PP-Tracking 提供了AI Studio公开项目案例,教程请参考[PP-Tracking low_conf_thres: 0.2 ``` +### FairMOT迁移学习模型 + +### 在GMOT-40的airplane子集上的结果 +| 骨干网络 | 输入尺寸 | MOTA | IDF1 | IDS | FP | FN | FPS | 下载链接 | 配置文件 | +| :--------------| :------- | :----: | :----: | :----: | :----: | :----: | :------: | :----: |:-----: | +| DLA-34 | 1088x608 | 96.6 | 94.7 | 19 | 300 | 466 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_airplane.pdparams) | [配置文件](./fairmot_dla34_30e_1088x608_airplane.yml) | + +**注意:** + - 此模型数据集是GMOT-40的airplane类别抽离出来的子集,PaddleDetection团队整理后的下载链接为: ```wget https://bj.bcebos.com/v1/paddledet/data/mot/airplane.zip```,下载解压存放于 ```dataset/mot```目录下,并将其中的```airplane.train```复制存放于```dataset/mot/image_lists```。 + - FairMOT模型此处训练是采用行人FairMOT训好的模型作为预训练权重,使用的训练集是airplane全集共4个视频序列,验证也是在全集上测的。 + - 应用到其他物体的跟踪,需要更改对应的config文件里的tracker部分的```min_box_area```和```vertical_ratio```,如下所示: + ``` +JDETracker: + conf_thres: 0.4 + tracked_thresh: 0.4 + metric_type: cosine + min_box_area: 0 # 200 for pedestrian + vertical_ratio: 0 # 1.6 for pedestrian + ``` + ## 快速开始 ### 1. 训练 diff --git a/configs/mot/fairmot/_base_/fairmot_dla34.yml b/configs/mot/fairmot/_base_/fairmot_dla34.yml index 37eab6563..b9f5c65a3 100644 --- a/configs/mot/fairmot/_base_/fairmot_dla34.yml +++ b/configs/mot/fairmot/_base_/fairmot_dla34.yml @@ -43,3 +43,5 @@ JDETracker: conf_thres: 0.4 tracked_thresh: 0.4 metric_type: cosine + min_box_area: 200 + vertical_ratio: 1.6 # for pedestrian diff --git a/configs/mot/fairmot/_base_/fairmot_hardnet85.yml b/configs/mot/fairmot/_base_/fairmot_hardnet85.yml index d5dfedb4d..0924d5fcf 100644 --- a/configs/mot/fairmot/_base_/fairmot_hardnet85.yml +++ b/configs/mot/fairmot/_base_/fairmot_hardnet85.yml @@ -39,3 +39,5 @@ JDETracker: conf_thres: 0.4 tracked_thresh: 0.4 metric_type: cosine + min_box_area: 200 + vertical_ratio: 1.6 # for pedestrian diff --git a/configs/mot/fairmot/_base_/fairmot_hrnetv2_w18_dlafpn.yml b/configs/mot/fairmot/_base_/fairmot_hrnetv2_w18_dlafpn.yml index 07f6a45be..36f761c6f 100644 --- a/configs/mot/fairmot/_base_/fairmot_hrnetv2_w18_dlafpn.yml +++ b/configs/mot/fairmot/_base_/fairmot_hrnetv2_w18_dlafpn.yml @@ -34,3 +34,5 @@ JDETracker: conf_thres: 0.4 tracked_thresh: 0.4 metric_type: cosine + min_box_area: 200 + vertical_ratio: 1.6 # for pedestrian diff --git a/configs/mot/fairmot/fairmot_dla34_30e_1088x608_airplane.yml b/configs/mot/fairmot/fairmot_dla34_30e_1088x608_airplane.yml new file mode 100644 index 000000000..441947c95 --- /dev/null +++ b/configs/mot/fairmot/fairmot_dla34_30e_1088x608_airplane.yml @@ -0,0 +1,33 @@ +_BASE_: [ + 'fairmot_dla34_30e_1088x608.yml', +] +pretrain_weights: https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams +weights: output/fairmot_dla34_30e_1088x608_airplane/model_final + +JDETracker: + conf_thres: 0.4 + tracked_thresh: 0.4 + metric_type: cosine + min_box_area: 0 + vertical_ratio: 0 + +# for MOT training +TrainDataset: + !MOTDataSet + dataset_dir: dataset/mot + image_lists: ['airplane.train'] + data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_ide'] + +# for MOT evaluation +# If you want to change the MOT evaluation dataset, please modify 'data_root' +EvalMOTDataset: + !MOTImageFolder + dataset_dir: dataset/mot + data_root: airplane/images/train + keep_ori_im: False # set True if save visualization images or video, or used in DeepSORT + +# for MOT video inference +TestMOTDataset: + !MOTImageFolder + dataset_dir: dataset/mot + keep_ori_im: True # set True if save visualization images or video -- GitLab