diff --git a/configs/mot/README.md b/configs/mot/README.md
index 18284eb34c07682d5af98eb26d5e8886adc9bc81..f14975f9adc2f9b2f728d109782b10f53799d062 100644
--- a/configs/mot/README.md
+++ b/configs/mot/README.md
@@ -47,12 +47,11 @@ PP-Tracking 提供了简洁的GUI可视化界面,教程请参考[PP-Tracking
## 安装依赖
一键安装MOT相关的依赖:
```
-pip install lap sklearn motmetrics openpyxl cython_bbox
+pip install lap sklearn motmetrics openpyxl
或者
pip install -r requirements.txt
```
**注意:**
-- `cython_bbox`在windows上安装:`pip install -e git+https://github.com/samson-wang/cython_bbox.git#egg=cython-bbox`。可参考这个[教程](https://stackoverflow.com/questions/60349980/is-there-a-way-to-install-cython-bbox-for-windows)。
- 预测需确保已安装[ffmpeg](https://ffmpeg.org/ffmpeg.html), Linux(Ubuntu)平台可以直接用以下命令安装:`apt-get update && apt-get install -y ffmpeg`。
diff --git a/configs/mot/README_en.md b/configs/mot/README_en.md
index e23bc451b36cb100440d47e90351eb6d22879983..6cbdf3d66d1f614c4893d4854f455addea3085fa 100644
--- a/configs/mot/README_en.md
+++ b/configs/mot/README_en.md
@@ -49,12 +49,11 @@ PP-Tracking supports GUI predict and deployment. Please refer to this [doc](http
## Installation
Install all the related dependencies for MOT:
```
-pip install lap sklearn motmetrics openpyxl cython_bbox
+pip install lap sklearn motmetrics openpyxl
or
pip install -r requirements.txt
```
**Notes:**
-- Install `cython_bbox` for Windows: `pip install -e git+https://github.com/samson-wang/cython_bbox.git#egg=cython-bbox`. You can refer to this [tutorial](https://stackoverflow.com/questions/60349980/is-there-a-way-to-install-cython-bbox-for-windows).
- Please make sure that [ffmpeg](https://ffmpeg.org/ffmpeg.html) is installed first, on Linux(Ubuntu) platform you can directly install it by the following command:`apt-get update && apt-get install -y ffmpeg`.
diff --git a/configs/mot/fairmot/README.md b/configs/mot/fairmot/README.md
index 25441f21cba40a5e7b26dbbab7627e8bb7097b2f..fbb9daa04e05b1f9848c03ef62f790ebeeee167e 100644
--- a/configs/mot/fairmot/README.md
+++ b/configs/mot/fairmot/README.md
@@ -86,7 +86,7 @@ PP-tracking provides an AI studio public project tutorial. Please refer to this
### Results on MOT-17 Half Set
| backbone | input shape | MOTA | IDF1 | IDS | FP | FN | FPS | download | config |
| :--------------| :------- | :----: | :----: | :----: | :----: | :----: | :------: | :----: |:-----: |
-| DLA-34 | 1088x608 | 69.1 | 72.8 | 299 | 1957 | 14412 | - |[model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_bytetracker.pdparams) | [config](./fairmot_dla34_30e_1088x608.yml) |
+| DLA-34 | 1088x608 | 69.1 | 72.8 | 299 | 1957 | 14412 | - |[model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams) | [config](./fairmot_dla34_30e_1088x608.yml) |
| DLA-34 + BYTETracker| 1088x608 | 70.3 | 73.2 | 234 | 2176 | 13598 | - |[model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_bytetracker.pdparams) | [config](./fairmot_dla34_30e_1088x608_bytetracker.yml) |
**Notes:**
diff --git a/configs/mot/fairmot/README_cn.md b/configs/mot/fairmot/README_cn.md
index bb22459e858c36414a13c142e38184df3899b7b4..dd5a27874e6c7439222ca9f8648099ca25bf9863 100644
--- a/configs/mot/fairmot/README_cn.md
+++ b/configs/mot/fairmot/README_cn.md
@@ -82,7 +82,7 @@ PP-Tracking 提供了AI Studio公开项目案例,教程请参考[PP-Tracking
### 在MOT-17 Half上结果
| 骨干网络 | 输入尺寸 | MOTA | IDF1 | IDS | FP | FN | FPS | 下载链接 | 配置文件 |
| :--------------| :------- | :----: | :----: | :----: | :----: | :----: | :------: | :----: |:-----: |
-| DLA-34 | 1088x608 | 69.1 | 72.8 | 299 | 1957 | 14412 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_bytetracker.pdparams) | [配置文件](./fairmot_dla34_30e_1088x608.yml) |
+| DLA-34 | 1088x608 | 69.1 | 72.8 | 299 | 1957 | 14412 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams) | [配置文件](./fairmot_dla34_30e_1088x608.yml) |
| DLA-34 + BYTETracker| 1088x608 | 70.3 | 73.2 | 234 | 2176 | 13598 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_bytetracker.pdparams) | [配置文件](./fairmot_dla34_30e_1088x608_bytetracker.yml) |
diff --git a/configs/mot/fairmot/fairmot_dla34_30e_1088x608_bytetracker.yml b/configs/mot/fairmot/fairmot_dla34_30e_1088x608_bytetracker.yml
index 7b668c5687584a65e6895efe26454ca4418c7226..a0ad44a0f9a6ef12d3904f1d78ede896f917a90b 100644
--- a/configs/mot/fairmot/fairmot_dla34_30e_1088x608_bytetracker.yml
+++ b/configs/mot/fairmot/fairmot_dla34_30e_1088x608_bytetracker.yml
@@ -14,8 +14,18 @@ TrainDataset:
image_lists: ['mot17.half', 'caltech.all', 'cuhksysu.train', 'prw.train', 'citypersons.train', 'eth.train']
data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_ide']
+# for MOT evaluation
+# If you want to change the MOT evaluation dataset, please modify 'data_root'
+EvalMOTDataset:
+ !MOTImageFolder
+ dataset_dir: dataset/mot
+ data_root: MOT17/images/half
+ keep_ori_im: False # set True if save visualization images or video, or used in DeepSORT
+
JDETracker:
use_byte: True
match_thres: 0.8
conf_thres: 0.4
low_conf_thres: 0.2
+ min_box_area: 200
+ vertical_ratio: 1.6 # for pedestrian
diff --git a/configs/mot/pedestrian/README_cn.md b/configs/mot/pedestrian/README_cn.md
index eca2963c51872e000b7b9ab0e02770b1fc98b60a..768733db537c5f752bbb56198bad196c68b28602 100644
--- a/configs/mot/pedestrian/README_cn.md
+++ b/configs/mot/pedestrian/README_cn.md
@@ -18,7 +18,7 @@
| :-------------| :-------- | :------- | :----: | :----: | :----: | :-----: |:------: |
| PathTrack | DLA-34 | 1088x608 | 44.9 | 59.3 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_pathtrack.pdparams) | [配置文件](./fairmot_dla34_30e_1088x608_pathtrack.yml) |
| VisDrone | DLA-34 | 1088x608 | 49.2 | 63.1 | - | [下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_visdrone_pedestrian.pdparams) | [配置文件](./fairmot_dla34_30e_1088x608_visdrone_pedestrian.yml) |
-| VisDrone | HRNetv2-W18| 1088x608 | 40.5 | 54.7 | - | [下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone_pedestrian.pdparams) | [配置文件](./fairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone_pedestrian.yml) |
+| VisDrone | HRNetv2-W18| 1088x608 | 40.5 | 54.7 | - | [下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_pedestrian.pdparams) | [配置文件](./fairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_pedestrian.yml) |
| VisDrone | HRNetv2-W18| 864x480 | 38.6 | 50.9 | - | [下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone_pedestrian.pdparams) | [配置文件](./fairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone_pedestrian.yml) |
| VisDrone | HRNetv2-W18| 576x320 | 30.6 | 47.2 | - | [下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone_pedestrian.pdparams) | [配置文件](./fairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone_pedestrian.yml) |
@@ -124,8 +124,8 @@ month={Oct},}
@ARTICLE{9573394,
author={Zhu, Pengfei and Wen, Longyin and Du, Dawei and Bian, Xiao and Fan, Heng and Hu, Qinghua and Ling, Haibin},
- journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
- title={Detection and Tracking Meet Drones Challenge},
+ journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
+ title={Detection and Tracking Meet Drones Challenge},
year={2021},
volume={},
number={},
diff --git a/configs/picodet/README.md b/configs/picodet/README.md
index a226cc9a95e91b3e28635023996e201eed08e089..dfa15b5b2ab5525070dbdb9a46ba10335a414a62 100644
--- a/configs/picodet/README.md
+++ b/configs/picodet/README.md
@@ -1,60 +1,63 @@
-English | [简体中文](README_cn.md)
+简体中文 | [English](README_en.md)
# PP-PicoDet
![](../../docs/images/picedet_demo.jpeg)
-## News
+## 最新动态
-- Released a new series of PP-PicoDet models: **(2022.03.20)**
- - (1) It was used TAL/Task-aligned-Head and optimized PAN, which greatly improved the accuracy;
- - (2) Moreover optimized CPU prediction speed, and the training speed is greatly improved;
- - (3) The export model includes post-processing, and the prediction directly outputs the result, without secondary development, and the migration cost is lower.
+- 发布全新系列PP-PicoDet模型:**(2022.03.20)**
+ - (1)引入TAL及Task-aligned Head,优化PAN等结构,精度大幅提升;
+ - (2)优化CPU端预测速度,同时训练速度大幅提升;
+ - (3)导出模型将后处理包含在网络中,预测直接输出box结果,无需二次开发,迁移成本更低。
-### Legacy Model
+## 历史版本模型
-- Please refer to: [PicoDet 2021.10版本](./legacy_model/)
+- 详情请参考:[PicoDet 2021.10版本](./legacy_model/)
-## Introduction
+## 简介
-We developed a series of lightweight models, named `PP-PicoDet`. Because of the excellent performance, our models are very suitable for deployment on mobile or CPU. For more details, please refer to our [report on arXiv](https://arxiv.org/abs/2111.00902).
+PaddleDetection中提出了全新的轻量级系列模型`PP-PicoDet`,在移动端具有卓越的性能,成为全新SOTA轻量级模型。详细的技术细节可以参考我们的[arXiv技术报告](https://arxiv.org/abs/2111.00902)。
-- 🌟 Higher mAP: the **first** object detectors that surpass mAP(0.5:0.95) **30+** within 1M parameters when the input size is 416.
-- 🚀 Faster latency: 150FPS on mobile ARM CPU.
-- 😊 Deploy friendly: support PaddleLite/MNN/NCNN/OpenVINO and provide C++/Python/Android implementation.
-- 😍 Advanced algorithm: use the most advanced algorithms and offer innovation, such as ESNet, CSP-PAN, SimOTA with VFL, etc.
+PP-PicoDet模型有如下特点:
+
+- 🌟 更高的mAP: 第一个在1M参数量之内`mAP(0.5:0.95)`超越**30+**(输入416像素时)。
+- 🚀 更快的预测速度: 网络预测在ARM CPU下可达150FPS。
+- 😊 部署友好: 支持PaddleLite/MNN/NCNN/OpenVINO等预测库,支持转出ONNX,提供了C++/Python/Android的demo。
+- 😍 先进的算法: 我们在现有SOTA算法中进行了创新, 包括:ESNet, CSP-PAN, SimOTA等等。
-## Benchmark
+## 基线
-| Model | Input size | mAPval
0.5:0.95 | mAPval
0.5 | Params
(M) | FLOPS
(G) | Latency[CPU](#latency)
(ms) | Latency[Lite](#latency)
(ms) | Download | Config |
+| 模型 | 输入尺寸 | mAPval
0.5:0.95 | mAPval
0.5 | 参数量
(M) | FLOPS
(G) | 预测时延[NCNN](#latency)
(ms) | 预测时延[Lite](#latency)
(ms) | 下载 | 配置文件 |
| :-------- | :--------: | :---------------------: | :----------------: | :----------------: | :---------------: | :-----------------------------: | :-----------------------------: | :----------------------------------------: | :--------------------------------------- |
| PicoDet-XS | 320*320 | 23.5 | 36.1 | 0.70 | 0.67 | 10.9ms | 7.81ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_xs_320_coco_lcnet.yml) |
| PicoDet-XS | 416*416 | 26.2 | 39.3 | 0.70 | 1.13 | 15.4ms | 12.38ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_xs_416_coco_lcnet.yml) |
| PicoDet-S | 320*320 | 29.1 | 43.4 | 1.18 | 0.97 | 12.6ms | 9.56ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_320_coco_lcnet.yml) |
-| PicoDet-S | 416*416 | 32.5 | 47.6 | 1.18 | 1.65 | 17.2ms | 15.20 | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_416_coco_lcnet.yml) |
+| PicoDet-S | 416*416 | 32.5 | 47.6 | 1.18 | 1.65 | 17.2ms | 15.20ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_416_coco_lcnet.yml) |
| PicoDet-M | 320*320 | 34.4 | 50.0 | 3.46 | 2.57 | 14.5ms | 17.68ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_320_coco_lcnet.yml) |
| PicoDet-M | 416*416 | 37.5 | 53.4 | 3.46 | 4.34 | 19.5ms | 28.39ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_416_coco_lcnet.yml) |
| PicoDet-L | 320*320 | 36.1 | 52.0 | 5.80 | 4.20 | 18.3ms | 25.21ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_320_coco_lcnet.yml) |
| PicoDet-L | 416*416 | 39.4 | 55.7 | 5.80 | 7.10 | 22.1ms | 42.23ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_416_coco_lcnet.yml) |
-| PicoDet-L | 640*640 | 42.3 | 59.2 | 5.80 | 16.81 | 43.1ms | 108.1ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_640_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_640_coco_lcnet.yml) |
+| PicoDet-L | 640*640 | 42.6 | 59.2 | 5.80 | 16.81 | 43.1ms | 108.1ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_640_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_640_coco_lcnet.yml) |
+
-Table Notes:
+注意事项:
-- Latency: All our models test on `Intel-Xeon-Gold-6148` CPU with MKLDNN by 10 threads and `Qualcomm Snapdragon 865(4xA77+4xA55)` with 4 threads by arm8 and with FP16. In the above table, test CPU latency on Paddle-Inference and testing Mobile latency with `Lite`->[Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite).
-- PicoDet is trained on COCO train2017 dataset and evaluated on COCO val2017. And PicoDet used 4 GPUs for training and all checkpoints are trained with default settings and hyperparameters.
-- Benchmark test: When testing the speed benchmark, the post-processing is not included in the exported model, you need to set `-o export.benchmark=True` or manually modify [runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml#L12).
+- 时延测试: 我们所有的模型都在英特尔至强6148的CPU(MKLDNN 10线程)和`骁龙865(4xA77+4xA55)`的ARM CPU上测试(4线程,FP16预测)。上面表格中标有`CPU`的是使用Paddle Inference库测试,标有`Lite`的是使用[Paddle Lite](https://github.com/PaddlePaddle/Paddle-Lite)进行测试。
+- PicoDet在COCO train2017上训练,并且在COCO val2017上进行验证。使用4卡GPU训练,并且上表所有的预训练模型都是通过发布的默认配置训练得到。
+- Benchmark测试:测试速度benchmark性能时,导出模型后处理不包含在网络中,需要设置`-o export.benchmark=True` 或手动修改[runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml#L12)。
-#### Benchmark of Other Models
+#### 其他模型的基线
-| Model | Input size | mAPval
0.5:0.95 | mAPval
0.5 | Params
(M) | FLOPS
(G) | Latency[NCNN](#latency)
(ms) |
+| 模型 | 输入尺寸 | mAPval
0.5:0.95 | mAPval
0.5 | 参数量
(M) | FLOPS
(G) | 预测时延[NCNN](#latency)
(ms) |
| :-------- | :--------: | :---------------------: | :----------------: | :----------------: | :---------------: | :-----------------------------: |
| YOLOv3-Tiny | 416*416 | 16.6 | 33.1 | 8.86 | 5.62 | 25.42 |
| YOLOv4-Tiny | 416*416 | 21.7 | 40.2 | 6.06 | 6.96 | 23.69 |
@@ -68,38 +71,39 @@ We developed a series of lightweight models, named `PP-PicoDet`. Because of the
| YOLOv5n | 640*640 | 28.4 | 46.0 | 1.9 | 4.5 | 40.35 |
| YOLOv5s | 640*640 | 37.2 | 56.0 | 7.2 | 16.5 | 78.05 |
-- Testing Mobile latency with code: [MobileDetBenchmark](https://github.com/JiweiMaster/MobileDetBenchmark).
+- ARM测试的benchmark脚本来自: [MobileDetBenchmark](https://github.com/JiweiMaster/MobileDetBenchmark)。
-## Quick Start
+## 快速开始
-Requirements:
+依赖包:
-- PaddlePaddle >= 2.2.1
+- PaddlePaddle == 2.2.2
-Installation
+安装
-- [Installation guide](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/INSTALL.md)
-- [Prepare dataset](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/PrepareDataSet_en.md)
+- [安装指导文档](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/INSTALL.md)
+- [准备数据文档](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/PrepareDataSet_en.md)
-Training and Evaluation
+训练&评估
-- Training model on single-GPU:
+- 单卡GPU上训练:
```shell
# training on single-GPU
export CUDA_VISIBLE_DEVICES=0
python tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --eval
```
-If the GPU is out of memory during training, reduce the batch_size in TrainReader, and reduce the base_lr in LearningRate proportionally.
-- Training model on multi-GPU:
+**注意:**如果训练时显存out memory,将TrainReader中batch_size调小,同时LearningRate中base_lr等比例减小。同时我们发布的config均由4卡训练得到,如果改变GPU卡数为1,那么base_lr需要减小4倍。
+
+- 多卡GPU上训练:
```shell
@@ -108,31 +112,31 @@ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --eval
```
-- Evaluation:
+- 评估:
```shell
python tools/eval.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \
-o weights=https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams
```
-- Infer:
+- 测试:
```shell
python tools/infer.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \
-o weights=https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams
```
-Detail also can refer to [Quick start guide](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/GETTING_STARTED.md).
+详情请参考[快速开始文档](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/GETTING_STARTED.md).
-## Deployment
+## 部署
-### Export and Convert Model
+### 导出及转换模型
-1. Export model (click to expand)
+1. 导出模型 (点击展开)
```shell
cd PaddleDetection
@@ -141,18 +145,21 @@ python tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \
--output_dir=inference_model
```
+- 如无需导出后处理,请指定:`-o export.benchmark=True`(如果-o已出现过,此处删掉-o)或者手动修改[runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml) 中相应字段。
+- 如无需导出NMS,请指定:`-o export.nms=False`或者手动修改[runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml) 中相应字段。
+
-2. Convert to PaddleLite (click to expand)
+2. 转换模型至Paddle Lite (点击展开)
-- Install Paddlelite>=2.10:
+- 安装Paddlelite>=2.10:
```shell
pip install paddlelite
```
-- Convert model:
+- 转换模型至Paddle Lite格式:
```shell
# FP32
@@ -164,16 +171,16 @@ paddle_lite_opt --model_dir=inference_model/picodet_s_320_coco_lcnet --valid_tar
-3. Convert to ONNX (click to expand)
+3. 转换模型至ONNX (点击展开)
-- Install [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX) >= 0.7 and ONNX > 1.10.1, for details, please refer to [Tutorials of Export ONNX Model](../../deploy/EXPORT_ONNX_MODEL.md)
+- 安装[Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX) >= 0.7 并且 ONNX > 1.10.1, 细节请参考[导出ONNX模型教程](../../deploy/EXPORT_ONNX_MODEL.md)
```shell
pip install onnx
-pip install paddle2onnx
+pip install paddle2onnx==0.9.2
```
-- Convert model:
+- 转换模型:
```shell
paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \
@@ -183,22 +190,22 @@ paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \
--save_file picodet_s_320_coco.onnx
```
-- Simplify ONNX model: use onnx-simplifier to simplify onnx model.
+- 简化ONNX模型: 使用`onnx-simplifier`库来简化ONNX模型。
- - Install onnx-simplifier >= 0.3.6:
+ - 安装 onnx-simplifier >= 0.3.6:
```shell
pip install onnx-simplifier
```
- - simplify onnx model:
+ - 简化ONNX模型:
```shell
python -m onnxsim picodet_s_320_coco.onnx picodet_s_processed.onnx
```
-- Deploy models
+- 部署用的模型
-| Model | Input size | ONNX | Paddle Lite(fp32) | Paddle Lite(fp16) |
+| 模型 | 输入尺寸 | ONNX | Paddle Lite(fp32) | Paddle Lite(fp16) |
| :-------- | :--------: | :---------------------: | :----------------: | :----------------: |
| PicoDet-S | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_320_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320_fp16.tar) |
| PicoDet-S | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416_fp16.tar) |
@@ -212,31 +219,28 @@ paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \
| PicoDet-LCNet 1.5x | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_lcnet_1_5x_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_lcnet_1_5x.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_lcnet_1_5x_fp16.tar) |
-### Deploy
+### 部署
- PaddleInference demo [Python](../../deploy/python) & [C++](../../deploy/cpp)
- [PaddleLite C++ demo](../../deploy/lite)
-- [NCNN C++/Python demo](../../deploy/third_engine/demo_ncnn)
-- [MNN C++/Python demo](../../deploy/third_engine/demo_mnn)
-- [OpenVINO C++ demo](../../deploy/third_engine/demo_openvino)
- [Android demo(Paddle Lite)](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/develop/object_detection/android/app/cxx/picodet_detection_demo)
-Android demo visualization:
+Android demo可视化:
-## Quantization
+## 量化
-Requirements:
+依赖包:
- PaddlePaddle >= 2.2.2
- PaddleSlim >= 2.2.1
-**Install:**
+**安装:**
```shell
pip install paddleslim==2.2.1
@@ -245,61 +249,61 @@ pip install paddleslim==2.2.1
-Quant aware (click to expand)
+量化训练 (点击展开)
-Configure the quant config and start training:
+开始量化训练:
```shell
python tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \
--slim_config configs/slim/quant/picodet_s_quant.yml --eval
```
-- More detail can refer to [slim document](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/slim)
+- 更多细节请参考[slim文档](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/slim)
-Post quant (click to expand)
+离线量化 (点击展开)
-Configure the post quant config and start calibrate model:
+校准及导出量化模型:
```shell
python tools/post_quant.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \
--slim_config configs/slim/post_quant/picodet_s_ptq.yml
```
-- Notes: Now the accuracy of post quant is abnormal and this problem is being solved.
+- 注意: 离线量化模型精度问题正在解决中.
-## Unstructured Pruning
+## 非结构化剪枝
-Toturial:
+教程:
-Please refer this [documentation](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/pruner/README.md) for details such as requirements, training and deployment.
+训练及部署细节请参考[非结构化剪枝文档](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/pruner/README.md)。
-## Application
+## 应用
-- **Pedestrian detection:** model zoo of `PicoDet-S-Pedestrian` please refer to [PP-TinyPose](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/keypoint/tiny_pose#%E8%A1%8C%E4%BA%BA%E6%A3%80%E6%B5%8B%E6%A8%A1%E5%9E%8B)
+- **行人检测:** `PicoDet-S-Pedestrian`行人检测模型请参考[PP-TinyPose](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/keypoint/tiny_pose#%E8%A1%8C%E4%BA%BA%E6%A3%80%E6%B5%8B%E6%A8%A1%E5%9E%8B)
-- **Mainbody detection:** model zoo of `PicoDet-L-Mainbody` please refer to [mainbody detection](./application/mainbody_detection/README.md)
+- **主体检测:** `PicoDet-L-Mainbody`主体检测模型请参考[主体检测文档](./application/mainbody_detection/README.md)
## FAQ
-Out of memory error.
+显存爆炸(Out of memory error)
-Please reduce the `batch_size` of `TrainReader` in config.
+请减小配置文件中`TrainReader`的`batch_size`。
-How to transfer learning.
+如何迁移学习
-Please reset `pretrain_weights` in config, which trained on coco. Such as:
+请重新设置配置文件中的`pretrain_weights`字段,比如利用COCO上训好的模型在自己的数据上继续训练:
```yaml
pretrain_weights: https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams
```
@@ -307,17 +311,17 @@ pretrain_weights: https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcne
-The transpose operator is time-consuming on some hardware.
+`transpose`算子在某些硬件上耗时验证
-Please use `PicoDet-LCNet` model, which has fewer `transpose` operators.
+请使用`PicoDet-LCNet`模型,`transpose`较少。
-How to count model parameters.
+如何计算模型参数量。
-You can insert below code at [here](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/engine/trainer.py#L141) to count learnable parameters.
+可以将以下代码插入:[trainer.py](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/engine/trainer.py#L141) 来计算参数量。
```python
params = sum([
@@ -329,8 +333,8 @@ print('params: ', params)
-## Cite PP-PicoDet
-If you use PicoDet in your research, please cite our work by using the following BibTeX entry:
+## 引用PP-PicoDet
+如果需要在你的研究中使用PP-PicoDet,请通过一下方式引用我们的技术报告:
```
@misc{yu2021pppicodet,
title={PP-PicoDet: A Better Real-Time Object Detector on Mobile Devices},
diff --git a/configs/picodet/README_cn.md b/configs/picodet/README_en.md
similarity index 65%
rename from configs/picodet/README_cn.md
rename to configs/picodet/README_en.md
index 7131200a2e106e50fe71a97eda566a4520bfc5e8..dfadb3abff17b35749b2047c3872c7c8293db2f7 100644
--- a/configs/picodet/README_cn.md
+++ b/configs/picodet/README_en.md
@@ -1,63 +1,60 @@
-简体中文 | [English](README.md)
+English | [简体中文](README.md)
# PP-PicoDet
![](../../docs/images/picedet_demo.jpeg)
-## 最新动态
+## News
-- 发布全新系列PP-PicoDet模型:**(2022.03.20)**
- - (1)引入TAL及Task-aligned Head,优化PAN等结构,精度大幅提升;
- - (2)优化CPU端预测速度,同时训练速度大幅提升;
- - (3)导出模型将后处理包含在网络中,预测直接输出box结果,无需二次开发,迁移成本更低。
+- Released a new series of PP-PicoDet models: **(2022.03.20)**
+ - (1) It was used TAL/Task-aligned-Head and optimized PAN, which greatly improved the accuracy;
+ - (2) Moreover optimized CPU prediction speed, and the training speed is greatly improved;
+ - (3) The export model includes post-processing, and the prediction directly outputs the result, without secondary development, and the migration cost is lower.
-## 历史版本模型
+### Legacy Model
-- 详情请参考:[PicoDet 2021.10版本](./legacy_model/)
+- Please refer to: [PicoDet 2021.10](./legacy_model/)
-## 简介
+## Introduction
-PaddleDetection中提出了全新的轻量级系列模型`PP-PicoDet`,在移动端具有卓越的性能,成为全新SOTA轻量级模型。详细的技术细节可以参考我们的[arXiv技术报告](https://arxiv.org/abs/2111.00902)。
+We developed a series of lightweight models, named `PP-PicoDet`. Because of the excellent performance, our models are very suitable for deployment on mobile or CPU. For more details, please refer to our [report on arXiv](https://arxiv.org/abs/2111.00902).
-PP-PicoDet模型有如下特点:
-
-- 🌟 更高的mAP: 第一个在1M参数量之内`mAP(0.5:0.95)`超越**30+**(输入416像素时)。
-- 🚀 更快的预测速度: 网络预测在ARM CPU下可达150FPS。
-- 😊 部署友好: 支持PaddleLite/MNN/NCNN/OpenVINO等预测库,支持转出ONNX,提供了C++/Python/Android的demo。
-- 😍 先进的算法: 我们在现有SOTA算法中进行了创新, 包括:ESNet, CSP-PAN, SimOTA等等。
+- 🌟 Higher mAP: the **first** object detectors that surpass mAP(0.5:0.95) **30+** within 1M parameters when the input size is 416.
+- 🚀 Faster latency: 150FPS on mobile ARM CPU.
+- 😊 Deploy friendly: support PaddleLite/MNN/NCNN/OpenVINO and provide C++/Python/Android implementation.
+- 😍 Advanced algorithm: use the most advanced algorithms and offer innovation, such as ESNet, CSP-PAN, SimOTA with VFL, etc.
-## 基线
+## Benchmark
-| 模型 | 输入尺寸 | mAPval
0.5:0.95 | mAPval
0.5 | 参数量
(M) | FLOPS
(G) | 预测时延[NCNN](#latency)
(ms) | 预测时延[Lite](#latency)
(ms) | 下载 | 配置文件 |
+| Model | Input size | mAPval
0.5:0.95 | mAPval
0.5 | Params
(M) | FLOPS
(G) | Latency[CPU](#latency)
(ms) | Latency[Lite](#latency)
(ms) | Download | Config |
| :-------- | :--------: | :---------------------: | :----------------: | :----------------: | :---------------: | :-----------------------------: | :-----------------------------: | :----------------------------------------: | :--------------------------------------- |
| PicoDet-XS | 320*320 | 23.5 | 36.1 | 0.70 | 0.67 | 10.9ms | 7.81ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_xs_320_coco_lcnet.yml) |
| PicoDet-XS | 416*416 | 26.2 | 39.3 | 0.70 | 1.13 | 15.4ms | 12.38ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_xs_416_coco_lcnet.yml) |
| PicoDet-S | 320*320 | 29.1 | 43.4 | 1.18 | 0.97 | 12.6ms | 9.56ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_320_coco_lcnet.yml) |
-| PicoDet-S | 416*416 | 32.5 | 47.6 | 1.18 | 1.65 | 17.2ms | 15.20 | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_416_coco_lcnet.yml) |
+| PicoDet-S | 416*416 | 32.5 | 47.6 | 1.18 | 1.65 | 17.2ms | 15.20ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_416_coco_lcnet.yml) |
| PicoDet-M | 320*320 | 34.4 | 50.0 | 3.46 | 2.57 | 14.5ms | 17.68ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_320_coco_lcnet.yml) |
| PicoDet-M | 416*416 | 37.5 | 53.4 | 3.46 | 4.34 | 19.5ms | 28.39ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_416_coco_lcnet.yml) |
| PicoDet-L | 320*320 | 36.1 | 52.0 | 5.80 | 4.20 | 18.3ms | 25.21ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_320_coco_lcnet.yml) |
| PicoDet-L | 416*416 | 39.4 | 55.7 | 5.80 | 7.10 | 22.1ms | 42.23ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_416_coco_lcnet.yml) |
-| PicoDet-L | 640*640 | 42.3 | 59.2 | 5.80 | 16.81 | 43.1ms | 108.1ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_640_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_640_coco_lcnet.yml) |
-
+| PicoDet-L | 640*640 | 42.6 | 59.2 | 5.80 | 16.81 | 43.1ms | 108.1ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_640_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_640_coco_lcnet.yml) |
-注意事项:
+Table Notes:
-- 时延测试: 我们所有的模型都在英特尔至强6148的CPU(MKLDNN 10线程)和`骁龙865(4xA77+4xA55)`的ARM CPU上测试(4线程,FP16预测)。上面表格中标有`CPU`的是使用Paddle Inference库测试,标有`Lite`的是使用[Paddle Lite](https://github.com/PaddlePaddle/Paddle-Lite)进行测试。
-- PicoDet在COCO train2017上训练,并且在COCO val2017上进行验证。使用4卡GPU训练,并且上表所有的预训练模型都是通过发布的默认配置训练得到。
-- Benchmark测试:测试速度benchmark性能时,导出模型后处理不包含在网络中,需要设置`-o export.benchmark=True` 或手动修改[runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml#L12)。
+- Latency: All our models test on `Intel-Xeon-Gold-6148` CPU with MKLDNN by 10 threads and `Qualcomm Snapdragon 865(4xA77+4xA55)` with 4 threads by arm8 and with FP16. In the above table, test CPU latency on Paddle-Inference and testing Mobile latency with `Lite`->[Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite).
+- PicoDet is trained on COCO train2017 dataset and evaluated on COCO val2017. And PicoDet used 4 GPUs for training and all checkpoints are trained with default settings and hyperparameters.
+- Benchmark test: When testing the speed benchmark, the post-processing is not included in the exported model, you need to set `-o export.benchmark=True` or manually modify [runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml#L12).
-#### 其他模型的基线
+#### Benchmark of Other Models
-| 模型 | 输入尺寸 | mAPval
0.5:0.95 | mAPval
0.5 | 参数量
(M) | FLOPS
(G) | 预测时延[NCNN](#latency)
(ms) |
+| Model | Input size | mAPval
0.5:0.95 | mAPval
0.5 | Params
(M) | FLOPS
(G) | Latency[NCNN](#latency)
(ms) |
| :-------- | :--------: | :---------------------: | :----------------: | :----------------: | :---------------: | :-----------------------------: |
| YOLOv3-Tiny | 416*416 | 16.6 | 33.1 | 8.86 | 5.62 | 25.42 |
| YOLOv4-Tiny | 416*416 | 21.7 | 40.2 | 6.06 | 6.96 | 23.69 |
@@ -71,39 +68,38 @@ PP-PicoDet模型有如下特点:
| YOLOv5n | 640*640 | 28.4 | 46.0 | 1.9 | 4.5 | 40.35 |
| YOLOv5s | 640*640 | 37.2 | 56.0 | 7.2 | 16.5 | 78.05 |
-- ARM测试的benchmark脚本来自: [MobileDetBenchmark](https://github.com/JiweiMaster/MobileDetBenchmark)。
+- Testing Mobile latency with code: [MobileDetBenchmark](https://github.com/JiweiMaster/MobileDetBenchmark).
-## 快速开始
+## Quick Start
-依赖包:
+Requirements:
-- PaddlePaddle == 2.2.2
+- PaddlePaddle >= 2.2.2
-安装
+Installation
-- [安装指导文档](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/INSTALL.md)
-- [准备数据文档](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/PrepareDataSet_en.md)
+- [Installation guide](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/INSTALL.md)
+- [Prepare dataset](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/PrepareDataSet_en.md)
-训练&评估
+Training and Evaluation
-- 单卡GPU上训练:
+- Training model on single-GPU:
```shell
# training on single-GPU
export CUDA_VISIBLE_DEVICES=0
python tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --eval
```
+If the GPU is out of memory during training, reduce the batch_size in TrainReader, and reduce the base_lr in LearningRate proportionally. At the same time, the configs we published are all trained with 4 GPUs. If the number of GPUs is changed to 1, the base_lr needs to be reduced by a factor of 4.
-如果训练时显存out memory,将TrainReader中batch_size调小,同时LearningRate中base_lr等比例减小。
-
-- 多卡GPU上训练:
+- Training model on multi-GPU:
```shell
@@ -112,31 +108,31 @@ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --eval
```
-- 评估:
+- Evaluation:
```shell
python tools/eval.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \
-o weights=https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams
```
-- 测试:
+- Infer:
```shell
python tools/infer.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \
-o weights=https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams
```
-详情请参考[快速开始文档](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/GETTING_STARTED.md).
+Detail also can refer to [Quick start guide](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/GETTING_STARTED.md).
-## 部署
+## Deployment
-### 导出及转换模型
+### Export and Convert Model
-1. 导出模型 (点击展开)
+1. Export model (click to expand)
```shell
cd PaddleDetection
@@ -145,18 +141,22 @@ python tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \
--output_dir=inference_model
```
+- If no post processing is required, please specify: `-o export.benchmark=True` (if -o has already appeared, delete -o here) or manually modify corresponding fields in [runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml).
+- If no NMS is required, please specify: `-o export.nms=True` or manually modify corresponding fields in [runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml).
+
+
-2. 转换模型至Paddle Lite (点击展开)
+2. Convert to PaddleLite (click to expand)
-- 安装Paddlelite>=2.10:
+- Install Paddlelite>=2.10:
```shell
pip install paddlelite
```
-- 转换模型至Paddle Lite格式:
+- Convert model:
```shell
# FP32
@@ -168,16 +168,16 @@ paddle_lite_opt --model_dir=inference_model/picodet_s_320_coco_lcnet --valid_tar
-3. 转换模型至ONNX (点击展开)
+3. Convert to ONNX (click to expand)
-- 安装[Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX) >= 0.7 并且 ONNX > 1.10.1, 细节请参考[导出ONNX模型教程](../../deploy/EXPORT_ONNX_MODEL.md)
+- Install [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX) >= 0.7 and ONNX > 1.10.1, for details, please refer to [Tutorials of Export ONNX Model](../../deploy/EXPORT_ONNX_MODEL.md)
```shell
pip install onnx
-pip install paddle2onnx
+pip install paddle2onnx==0.9.2
```
-- 转换模型:
+- Convert model:
```shell
paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \
@@ -187,22 +187,22 @@ paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \
--save_file picodet_s_320_coco.onnx
```
-- 简化ONNX模型: 使用`onnx-simplifier`库来简化ONNX模型。
+- Simplify ONNX model: use onnx-simplifier to simplify onnx model.
- - 安装 onnx-simplifier >= 0.3.6:
+ - Install onnx-simplifier >= 0.3.6:
```shell
pip install onnx-simplifier
```
- - 简化ONNX模型:
+ - simplify onnx model:
```shell
python -m onnxsim picodet_s_320_coco.onnx picodet_s_processed.onnx
```
-- 部署用的模型
+- Deploy models
-| 模型 | 输入尺寸 | ONNX | Paddle Lite(fp32) | Paddle Lite(fp16) |
+| Model | Input size | ONNX | Paddle Lite(fp32) | Paddle Lite(fp16) |
| :-------- | :--------: | :---------------------: | :----------------: | :----------------: |
| PicoDet-S | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_320_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320_fp16.tar) |
| PicoDet-S | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416_fp16.tar) |
@@ -216,31 +216,28 @@ paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \
| PicoDet-LCNet 1.5x | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_lcnet_1_5x_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_lcnet_1_5x.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_lcnet_1_5x_fp16.tar) |
-### 部署
+### Deploy
- PaddleInference demo [Python](../../deploy/python) & [C++](../../deploy/cpp)
- [PaddleLite C++ demo](../../deploy/lite)
-- [NCNN C++/Python demo](../../deploy/third_engine/demo_ncnn)
-- [MNN C++/Python demo](../../deploy/third_engine/demo_mnn)
-- [OpenVINO C++ demo](../../deploy/third_engine/demo_openvino)
- [Android demo(Paddle Lite)](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/develop/object_detection/android/app/cxx/picodet_detection_demo)
-Android demo可视化:
+Android demo visualization:
-## 量化
+## Quantization
-依赖包:
+Requirements:
- PaddlePaddle >= 2.2.2
- PaddleSlim >= 2.2.1
-**安装:**
+**Install:**
```shell
pip install paddleslim==2.2.1
@@ -249,61 +246,61 @@ pip install paddleslim==2.2.1
-量化训练 (点击展开)
+Quant aware (click to expand)
-开始量化训练:
+Configure the quant config and start training:
```shell
python tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \
--slim_config configs/slim/quant/picodet_s_quant.yml --eval
```
-- 更多细节请参考[slim文档](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/slim)
+- More detail can refer to [slim document](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/slim)
-离线量化 (点击展开)
+Post quant (click to expand)
-校准及导出量化模型:
+Configure the post quant config and start calibrate model:
```shell
python tools/post_quant.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \
--slim_config configs/slim/post_quant/picodet_s_ptq.yml
```
-- 注意: 离线量化模型精度问题正在解决中.
+- Notes: Now the accuracy of post quant is abnormal and this problem is being solved.
-## 非结构化剪枝
+## Unstructured Pruning
-教程:
+Toturial:
-训练及部署细节请参考[非结构化剪枝文档](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/pruner/README.md)。
+Please refer this [documentation](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/pruner/README.md) for details such as requirements, training and deployment.
-## 应用
+## Application
-- **行人检测:** `PicoDet-S-Pedestrian`行人检测模型请参考[PP-TinyPose](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/keypoint/tiny_pose#%E8%A1%8C%E4%BA%BA%E6%A3%80%E6%B5%8B%E6%A8%A1%E5%9E%8B)
+- **Pedestrian detection:** model zoo of `PicoDet-S-Pedestrian` please refer to [PP-TinyPose](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/keypoint/tiny_pose#%E8%A1%8C%E4%BA%BA%E6%A3%80%E6%B5%8B%E6%A8%A1%E5%9E%8B)
-- **主体检测:** `PicoDet-L-Mainbody`主体检测模型请参考[主体检测文档](./application/mainbody_detection/README.md)
+- **Mainbody detection:** model zoo of `PicoDet-L-Mainbody` please refer to [mainbody detection](./application/mainbody_detection/README.md)
## FAQ
-显存爆炸(Out of memory error)
+Out of memory error.
-请减小配置文件中`TrainReader`的`batch_size`。
+Please reduce the `batch_size` of `TrainReader` in config.
-如何迁移学习
+How to transfer learning.
-请重新设置配置文件中的`pretrain_weights`字段,比如利用COCO上训好的模型在自己的数据上继续训练:
+Please reset `pretrain_weights` in config, which trained on coco. Such as:
```yaml
pretrain_weights: https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams
```
@@ -311,17 +308,17 @@ pretrain_weights: https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcne
-`transpose`算子在某些硬件上耗时验证
+The transpose operator is time-consuming on some hardware.
-请使用`PicoDet-LCNet`模型,`transpose`较少。
+Please use `PicoDet-LCNet` model, which has fewer `transpose` operators.
-如何计算模型参数量。
+How to count model parameters.
-可以将以下代码插入:[trainer.py](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/engine/trainer.py#L141) 来计算参数量。
+You can insert below code at [here](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/engine/trainer.py#L141) to count learnable parameters.
```python
params = sum([
@@ -333,8 +330,8 @@ print('params: ', params)
-## 引用PP-PicoDet
-如果需要在你的研究中使用PP-PicoDet,请通过一下方式引用我们的技术报告:
+## Cite PP-PicoDet
+If you use PicoDet in your research, please cite our work by using the following BibTeX entry:
```
@misc{yu2021pppicodet,
title={PP-PicoDet: A Better Real-Time Object Detector on Mobile Devices},
diff --git a/deploy/cpp/src/object_detector.cc b/deploy/cpp/src/object_detector.cc
index a99fcd515337e72ff59a09c7eeaa12072a774cc1..e455c90aa9d7a3110357507e6199dcf76538a453 100644
--- a/deploy/cpp/src/object_detector.cc
+++ b/deploy/cpp/src/object_detector.cc
@@ -15,16 +15,15 @@
// for setprecision
#include
#include
-#include "include/object_detector.h"
-using namespace paddle_infer;
+#include "include/object_detector.h"
namespace PaddleDetection {
// Load Model and create model predictor
-void ObjectDetector::LoadModel(const std::string& model_dir,
+void ObjectDetector::LoadModel(const std::string &model_dir,
const int batch_size,
- const std::string& run_mode) {
+ const std::string &run_mode) {
paddle_infer::Config config;
std::string prog_file = model_dir + OS_PATH_SEP + "model.pdmodel";
std::string params_file = model_dir + OS_PATH_SEP + "model.pdiparams";
@@ -42,27 +41,22 @@ void ObjectDetector::LoadModel(const std::string& model_dir,
} else if (run_mode == "trt_int8") {
precision = paddle_infer::Config::Precision::kInt8;
} else {
- printf(
- "run_mode should be 'paddle', 'trt_fp32', 'trt_fp16' or "
- "'trt_int8'");
+ printf("run_mode should be 'paddle', 'trt_fp32', 'trt_fp16' or "
+ "'trt_int8'");
}
// set tensorrt
- config.EnableTensorRtEngine(1 << 30,
- batch_size,
- this->min_subgraph_size_,
- precision,
- false,
- this->trt_calib_mode_);
+ config.EnableTensorRtEngine(1 << 30, batch_size, this->min_subgraph_size_,
+ precision, false, this->trt_calib_mode_);
// set use dynamic shape
if (this->use_dynamic_shape_) {
- // set DynamicShsape for image tensor
+ // set DynamicShape for image tensor
const std::vector min_input_shape = {
- 1, 3, this->trt_min_shape_, this->trt_min_shape_};
+ batch_size, 3, this->trt_min_shape_, this->trt_min_shape_};
const std::vector max_input_shape = {
- 1, 3, this->trt_max_shape_, this->trt_max_shape_};
+ batch_size, 3, this->trt_max_shape_, this->trt_max_shape_};
const std::vector opt_input_shape = {
- 1, 3, this->trt_opt_shape_, this->trt_opt_shape_};
+ batch_size, 3, this->trt_opt_shape_, this->trt_opt_shape_};
const std::map> map_min_input_shape = {
{"image", min_input_shape}};
const std::map> map_max_input_shape = {
@@ -70,8 +64,8 @@ void ObjectDetector::LoadModel(const std::string& model_dir,
const std::map> map_opt_input_shape = {
{"image", opt_input_shape}};
- config.SetTRTDynamicShapeInfo(
- map_min_input_shape, map_max_input_shape, map_opt_input_shape);
+ config.SetTRTDynamicShapeInfo(map_min_input_shape, map_max_input_shape,
+ map_opt_input_shape);
std::cout << "TensorRT dynamic shape enabled" << std::endl;
}
}
@@ -96,12 +90,11 @@ void ObjectDetector::LoadModel(const std::string& model_dir,
}
// Visualiztion MaskDetector results
-cv::Mat VisualizeResult(
- const cv::Mat& img,
- const std::vector& results,
- const std::vector& lables,
- const std::vector& colormap,
- const bool is_rbox = false) {
+cv::Mat
+VisualizeResult(const cv::Mat &img,
+ const std::vector &results,
+ const std::vector &lables,
+ const std::vector &colormap, const bool is_rbox = false) {
cv::Mat vis_img = img.clone();
for (int i = 0; i < results.size(); ++i) {
// Configure color and text size
@@ -142,24 +135,18 @@ cv::Mat VisualizeResult(
origin.y = results[i].rect[1];
// Configure text background
- cv::Rect text_back = cv::Rect(results[i].rect[0],
- results[i].rect[1] - text_size.height,
- text_size.width,
- text_size.height);
+ cv::Rect text_back =
+ cv::Rect(results[i].rect[0], results[i].rect[1] - text_size.height,
+ text_size.width, text_size.height);
// Draw text, and background
cv::rectangle(vis_img, text_back, roi_color, -1);
- cv::putText(vis_img,
- text,
- origin,
- font_face,
- font_scale,
- cv::Scalar(255, 255, 255),
- thickness);
+ cv::putText(vis_img, text, origin, font_face, font_scale,
+ cv::Scalar(255, 255, 255), thickness);
}
return vis_img;
}
-void ObjectDetector::Preprocess(const cv::Mat& ori_im) {
+void ObjectDetector::Preprocess(const cv::Mat &ori_im) {
// Clone the image : keep the original mat for postprocess
cv::Mat im = ori_im.clone();
cv::cvtColor(im, im, cv::COLOR_BGR2RGB);
@@ -168,9 +155,8 @@ void ObjectDetector::Preprocess(const cv::Mat& ori_im) {
void ObjectDetector::Postprocess(
const std::vector mats,
- std::vector* result,
- std::vector bbox_num,
- std::vector output_data_,
+ std::vector *result,
+ std::vector bbox_num, std::vector output_data_,
bool is_rbox = false) {
result->clear();
int start_idx = 0;
@@ -226,12 +212,11 @@ void ObjectDetector::Postprocess(
}
void ObjectDetector::Predict(const std::vector imgs,
- const double threshold,
- const int warmup,
+ const double threshold, const int warmup,
const int repeats,
- std::vector* result,
- std::vector* bbox_num,
- std::vector* times) {
+ std::vector *result,
+ std::vector *bbox_num,
+ std::vector *times) {
auto preprocess_start = std::chrono::steady_clock::now();
int batch_size = imgs.size();
@@ -239,7 +224,7 @@ void ObjectDetector::Predict(const std::vector imgs,
std::vector in_data_all;
std::vector im_shape_all(batch_size * 2);
std::vector scale_factor_all(batch_size * 2);
- std::vector output_data_list_;
+ std::vector output_data_list_;
std::vector out_bbox_num_data_;
// in_net img for each batch
@@ -255,9 +240,8 @@ void ObjectDetector::Predict(const std::vector imgs,
scale_factor_all[bs_idx * 2] = inputs_.scale_factor_[0];
scale_factor_all[bs_idx * 2 + 1] = inputs_.scale_factor_[1];
- // TODO: reduce cost time
- in_data_all.insert(
- in_data_all.end(), inputs_.im_data_.begin(), inputs_.im_data_.end());
+ in_data_all.insert(in_data_all.end(), inputs_.im_data_.begin(),
+ inputs_.im_data_.end());
// collect in_net img
in_net_img_all[bs_idx] = inputs_.in_net_im_;
@@ -276,10 +260,10 @@ void ObjectDetector::Predict(const std::vector imgs,
pad_img.convertTo(pad_img, CV_32FC3);
std::vector pad_data;
pad_data.resize(rc * rh * rw);
- float* base = pad_data.data();
+ float *base = pad_data.data();
for (int i = 0; i < rc; ++i) {
- cv::extractChannel(
- pad_img, cv::Mat(rh, rw, CV_32FC1, base + i * rh * rw), i);
+ cv::extractChannel(pad_img,
+ cv::Mat(rh, rw, CV_32FC1, base + i * rh * rw), i);
}
in_data_all.insert(in_data_all.end(), pad_data.begin(), pad_data.end());
}
@@ -290,7 +274,7 @@ void ObjectDetector::Predict(const std::vector imgs,
auto preprocess_end = std::chrono::steady_clock::now();
// Prepare input tensor
auto input_names = predictor_->GetInputNames();
- for (const auto& tensor_name : input_names) {
+ for (const auto &tensor_name : input_names) {
auto in_tensor = predictor_->GetInputHandle(tensor_name);
if (tensor_name == "image") {
int rh = inputs_.in_net_shape_[0];
@@ -320,8 +304,8 @@ void ObjectDetector::Predict(const std::vector imgs,
for (int j = 0; j < output_names.size(); j++) {
auto output_tensor = predictor_->GetOutputHandle(output_names[j]);
std::vector output_shape = output_tensor->shape();
- int out_num = std::accumulate(
- output_shape.begin(), output_shape.end(), 1, std::multiplies());
+ int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
+ std::multiplies());
if (output_tensor->type() == paddle_infer::DataType::INT32) {
out_bbox_num_data_.resize(out_num);
output_tensor->CopyToCpu(out_bbox_num_data_.data());
@@ -344,8 +328,8 @@ void ObjectDetector::Predict(const std::vector imgs,
for (int j = 0; j < output_names.size(); j++) {
auto output_tensor = predictor_->GetOutputHandle(output_names[j]);
std::vector output_shape = output_tensor->shape();
- int out_num = std::accumulate(
- output_shape.begin(), output_shape.end(), 1, std::multiplies());
+ int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
+ std::multiplies());
output_shape_list.push_back(output_shape);
if (output_tensor->type() == paddle_infer::DataType::INT32) {
out_bbox_num_data_.resize(out_num);
@@ -371,22 +355,15 @@ void ObjectDetector::Predict(const std::vector imgs,
if (i == config_.fpn_stride_.size()) {
reg_max = output_shape_list[i][2] / 4 - 1;
}
- float* buffer = new float[out_tensor_list[i].size()];
- memcpy(buffer,
- &out_tensor_list[i][0],
+ float *buffer = new float[out_tensor_list[i].size()];
+ memcpy(buffer, &out_tensor_list[i][0],
out_tensor_list[i].size() * sizeof(float));
output_data_list_.push_back(buffer);
}
PaddleDetection::PicoDetPostProcess(
- result,
- output_data_list_,
- config_.fpn_stride_,
- inputs_.im_shape_,
- inputs_.scale_factor_,
- config_.nms_info_["score_threshold"].as(),
- config_.nms_info_["nms_threshold"].as(),
- num_class,
- reg_max);
+ result, output_data_list_, config_.fpn_stride_, inputs_.im_shape_,
+ inputs_.scale_factor_, config_.nms_info_["score_threshold"].as(),
+ config_.nms_info_["nms_threshold"].as(), num_class, reg_max);
bbox_num->push_back(result->size());
} else {
is_rbox = output_shape_list[0][output_shape_list[0].size() - 1] % 10 == 0;
diff --git a/deploy/pphuman/datacollector.py b/deploy/pphuman/datacollector.py
index cd459aad0680418bcd087d00662b0c310151ffc3..f1e3a21360fb871e26e53129cd8833cd123f1422 100644
--- a/deploy/pphuman/datacollector.py
+++ b/deploy/pphuman/datacollector.py
@@ -35,6 +35,9 @@ class Result(object):
return self.res_dict[name]
return None
+ def clear(self, name):
+ self.res_dict[name].clear()
+
class DataCollector(object):
"""
@@ -80,7 +83,6 @@ class DataCollector(object):
ids = int(mot_item[0])
if ids not in self.collector:
self.collector[ids] = copy.deepcopy(self.mots)
-
self.collector[ids]["frames"].append(frameid)
self.collector[ids]["rects"].append([mot_item[2:]])
if attr_res:
diff --git a/deploy/pphuman/mtmct.py b/deploy/pphuman/mtmct.py
index 30f84724809753b577503b3bb59d50a21731ddb1..5e0abbd9d0c7be69120cac04b3c5794d9bb9c436 100644
--- a/deploy/pphuman/mtmct.py
+++ b/deploy/pphuman/mtmct.py
@@ -297,10 +297,9 @@ def distill_idfeat(mot_res):
feature_new = feature_list
#if available frames number is more than 200, take one frame data per 20 frames
- if len(qualities_new) > 200:
- skipf = 20
- else:
- skipf = max(10, len(qualities_new) // 10)
+ skipf = 1
+ if len(qualities_new) > 20:
+ skipf = 2
quality_skip = np.array(qualities_new[::skipf])
feature_skip = np.array(feature_new[::skipf])
diff --git a/deploy/pphuman/pipeline.py b/deploy/pphuman/pipeline.py
index 4d6fa014ae783b61c4464b2e292c5d745a5297d1..9e23e0c0f8e34e963a1cf2597318bff527f991c3 100644
--- a/deploy/pphuman/pipeline.py
+++ b/deploy/pphuman/pipeline.py
@@ -587,7 +587,7 @@ class PipePredictor(object):
if self.cfg['visual']:
self.action_visual_helper.update(action_res)
- if self.with_mtmct:
+ if self.with_mtmct and frame_id % 10 == 0:
crop_input, img_qualities, rects = self.reid_predictor.crop_image_with_mot(
frame, mot_res)
if frame_id > self.warmup_frame:
@@ -603,6 +603,8 @@ class PipePredictor(object):
"rects": rects
}
self.pipeline_res.update(reid_res_dict, 'reid')
+ else:
+ self.pipeline_res.clear('reid')
self.collector.append(frame_id, self.pipeline_res)
diff --git a/deploy/pptracking/python/mot/matching/jde_matching.py b/deploy/pptracking/python/mot/matching/jde_matching.py
index eb3749885b0ad8e563e32cf3ca1b89c3364700bc..308b78c64babc6151f94d2b374c8bc092a29dc03 100644
--- a/deploy/pptracking/python/mot/matching/jde_matching.py
+++ b/deploy/pptracking/python/mot/matching/jde_matching.py
@@ -26,7 +26,7 @@ warnings.filterwarnings("ignore")
__all__ = [
'merge_matches',
'linear_assignment',
- 'cython_bbox_ious',
+ 'bbox_ious',
'iou_distance',
'embedding_distance',
'fuse_motion',
@@ -68,22 +68,28 @@ def linear_assignment(cost_matrix, thresh):
return matches, unmatched_a, unmatched_b
-def cython_bbox_ious(atlbrs, btlbrs):
- ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
- if ious.size == 0:
+def bbox_ious(atlbrs, btlbrs):
+ boxes = np.ascontiguousarray(atlbrs, dtype=np.float)
+ query_boxes = np.ascontiguousarray(btlbrs, dtype=np.float)
+ N = boxes.shape[0]
+ K = query_boxes.shape[0]
+ ious = np.zeros((N, K), dtype=boxes.dtype)
+ if N * K == 0:
return ious
- try:
- import cython_bbox
- except Exception as e:
- print('cython_bbox not found, please install cython_bbox.'
- 'for example: `pip install cython_bbox`.')
- exit()
-
- ious = cython_bbox.bbox_overlaps(
- np.ascontiguousarray(
- atlbrs, dtype=np.float),
- np.ascontiguousarray(
- btlbrs, dtype=np.float))
+
+ for k in range(K):
+ box_area = ((query_boxes[k, 2] - query_boxes[k, 0] + 1) *
+ (query_boxes[k, 3] - query_boxes[k, 1] + 1))
+ for n in range(N):
+ iw = (min(boxes[n, 2], query_boxes[k, 2]) - max(
+ boxes[n, 0], query_boxes[k, 0]) + 1)
+ if iw > 0:
+ ih = (min(boxes[n, 3], query_boxes[k, 3]) - max(
+ boxes[n, 1], query_boxes[k, 1]) + 1)
+ if ih > 0:
+ ua = float((boxes[n, 2] - boxes[n, 0] + 1) * (boxes[
+ n, 3] - boxes[n, 1] + 1) + box_area - iw * ih)
+ ious[n, k] = iw * ih / ua
return ious
@@ -98,7 +104,7 @@ def iou_distance(atracks, btracks):
else:
atlbrs = [track.tlbr for track in atracks]
btlbrs = [track.tlbr for track in btracks]
- _ious = cython_bbox_ious(atlbrs, btlbrs)
+ _ious = bbox_ious(atlbrs, btlbrs)
cost_matrix = 1 - _ious
return cost_matrix
diff --git a/deploy/python/infer.py b/deploy/python/infer.py
index 84c643935f3d3b20acd910b0fa7412b46e7d1b72..3296e16e5a9612ba71d862d6843d9b9f576be1ff 100644
--- a/deploy/python/infer.py
+++ b/deploy/python/infer.py
@@ -231,7 +231,7 @@ class Detector(object):
self.det_times.preprocess_time_s.end()
# model prediction
- result = self.predict(repeats=repeats) # warmup
+ result = self.predict(repeats=50) # warmup
self.det_times.inference_time_s.start()
result = self.predict(repeats=repeats)
self.det_times.inference_time_s.end(repeats=repeats)
@@ -296,7 +296,7 @@ class Detector(object):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
out_path = os.path.join(self.output_dir, video_out_name)
- fourcc = cv2.VideoWriter_fourcc(* 'mp4v')
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
index = 1
while (1):
@@ -790,7 +790,7 @@ def main():
if FLAGS.image_dir is None and FLAGS.image_file is not None:
assert FLAGS.batch_size == 1, "batch_size should be 1, when image_file is not None"
img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
- detector.predict_image(img_list, FLAGS.run_benchmark, repeats=10)
+ detector.predict_image(img_list, FLAGS.run_benchmark, repeats=100)
if not FLAGS.run_benchmark:
detector.det_times.info(average=True)
else:
diff --git a/ppdet/metrics/mcmot_metrics.py b/ppdet/metrics/mcmot_metrics.py
index 48d15c90e6512eb8943ca3ee224ac92b2795453c..c38cbda0ef4a6bc0ed13b4e918e8eb84de912b6b 100644
--- a/ppdet/metrics/mcmot_metrics.py
+++ b/ppdet/metrics/mcmot_metrics.py
@@ -306,11 +306,12 @@ class MCMOTEvaluator(object):
def load_annotations(self):
assert self.data_type == 'mcmot'
- self.gt_filename = os.path.join(self.data_root, '../',
- 'sequences',
+ self.gt_filename = os.path.join(self.data_root, '../', 'sequences',
'{}.txt'.format(self.seq_name))
if not os.path.exists(self.gt_filename):
- logger.warning("gt_filename '{}' of MCMOTEvaluator is not exist, so the MOTA will be -inf.")
+ logger.warning(
+ "gt_filename '{}' of MCMOTEvaluator is not exist, so the MOTA will be -INF."
+ )
def reset_accumulator(self):
import motmetrics as mm
diff --git a/ppdet/metrics/mot_metrics.py b/ppdet/metrics/mot_metrics.py
index af2f7dd19c801cfe2c34d86c6c77ed4816b6fbec..117525df2094b9f55bfbf5f48390ca95bf54799c 100644
--- a/ppdet/metrics/mot_metrics.py
+++ b/ppdet/metrics/mot_metrics.py
@@ -37,8 +37,11 @@ __all__ = ['MOTEvaluator', 'MOTMetric', 'JDEDetMetric', 'KITTIMOTMetric']
def read_mot_results(filename, is_gt=False, is_ignore=False):
valid_label = [1]
ignore_labels = [2, 7, 8, 12] # only in motchallenge datasets like 'MOT16'
- logger.info("In MOT16/17 dataset the valid_label of ground truth is '{}', "
- "in other dataset it should be '0' for single classs MOT.".format(valid_label[0]))
+ if is_gt:
+ logger.info(
+ "In MOT16/17 dataset the valid_label of ground truth is '{}', "
+ "in other dataset it should be '0' for single classs MOT.".format(
+ valid_label[0]))
results_dict = dict()
if os.path.isfile(filename):
with open(filename, 'r') as f:
@@ -118,7 +121,9 @@ class MOTEvaluator(object):
gt_filename = os.path.join(self.data_root, self.seq_name, 'gt',
'gt.txt')
if not os.path.exists(gt_filename):
- logger.warning("gt_filename '{}' of MOTEvaluator is not exist, so the MOTA will be -inf.")
+ logger.warning(
+ "gt_filename '{}' of MOTEvaluator is not exist, so the MOTA will be -INF."
+ )
self.gt_frame_dict = read_mot_results(gt_filename, is_gt=True)
self.gt_ignore_frame_dict = read_mot_results(
gt_filename, is_ignore=True)
diff --git a/ppdet/modeling/architectures/meta_arch.py b/ppdet/modeling/architectures/meta_arch.py
index 1f13c854072956395e8bb9bbb5b9ad9d43d2eeec..4ff84a97a61739e06f215f56a64daf0459e4a971 100644
--- a/ppdet/modeling/architectures/meta_arch.py
+++ b/ppdet/modeling/architectures/meta_arch.py
@@ -22,22 +22,23 @@ class BaseArch(nn.Layer):
self.fuse_norm = False
def load_meanstd(self, cfg_transform):
- self.scale = 1.
- self.mean = paddle.to_tensor([0.485, 0.456, 0.406]).reshape(
- (1, 3, 1, 1))
- self.std = paddle.to_tensor([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))
+ scale = 1.
+ mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
+ std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
for item in cfg_transform:
if 'NormalizeImage' in item:
- self.mean = paddle.to_tensor(item['NormalizeImage'][
- 'mean']).reshape((1, 3, 1, 1))
- self.std = paddle.to_tensor(item['NormalizeImage'][
- 'std']).reshape((1, 3, 1, 1))
+ mean = np.array(
+ item['NormalizeImage']['mean'], dtype=np.float32)
+ std = np.array(item['NormalizeImage']['std'], dtype=np.float32)
if item['NormalizeImage'].get('is_scale', True):
- self.scale = 1. / 255.
+ scale = 1. / 255.
break
if self.data_format == 'NHWC':
- self.mean = self.mean.reshape(1, 1, 1, 3)
- self.std = self.std.reshape(1, 1, 1, 3)
+ self.scale = paddle.to_tensor(scale / std).reshape((1, 1, 1, 3))
+ self.bias = paddle.to_tensor(-mean / std).reshape((1, 1, 1, 3))
+ else:
+ self.scale = paddle.to_tensor(scale / std).reshape((1, 3, 1, 1))
+ self.bias = paddle.to_tensor(-mean / std).reshape((1, 3, 1, 1))
def forward(self, inputs):
if self.data_format == 'NHWC':
@@ -46,7 +47,7 @@ class BaseArch(nn.Layer):
if self.fuse_norm:
image = inputs['image']
- self.inputs['image'] = (image * self.scale - self.mean) / self.std
+ self.inputs['image'] = image * self.scale + self.bias
self.inputs['im_shape'] = inputs['im_shape']
self.inputs['scale_factor'] = inputs['scale_factor']
else:
@@ -66,8 +67,7 @@ class BaseArch(nn.Layer):
outs = []
for inp in inputs_list:
if self.fuse_norm:
- self.inputs['image'] = (
- inp['image'] * self.scale - self.mean) / self.std
+ self.inputs['image'] = inp['image'] * self.scale + self.bias
self.inputs['im_shape'] = inp['im_shape']
self.inputs['scale_factor'] = inp['scale_factor']
else:
@@ -75,7 +75,7 @@ class BaseArch(nn.Layer):
outs.append(self.get_pred())
# multi-scale test
- if len(outs)>1:
+ if len(outs) > 1:
out = self.merge_multi_scale_predictions(outs)
else:
out = outs[0]
@@ -92,7 +92,9 @@ class BaseArch(nn.Layer):
keep_top_k = self.bbox_post_process.nms.keep_top_k
nms_threshold = self.bbox_post_process.nms.nms_threshold
else:
- raise Exception("Multi scale test only supports CascadeRCNN, FasterRCNN and MaskRCNN for now")
+ raise Exception(
+ "Multi scale test only supports CascadeRCNN, FasterRCNN and MaskRCNN for now"
+ )
final_boxes = []
all_scale_outs = paddle.concat([o['bbox'] for o in outs]).numpy()
@@ -101,9 +103,11 @@ class BaseArch(nn.Layer):
if np.count_nonzero(idxs) == 0:
continue
r = nms(all_scale_outs[idxs, 1:], nms_threshold)
- final_boxes.append(np.concatenate([np.full((r.shape[0], 1), c), r], 1))
+ final_boxes.append(
+ np.concatenate([np.full((r.shape[0], 1), c), r], 1))
out = np.concatenate(final_boxes)
- out = np.concatenate(sorted(out, key=lambda e: e[1])[-keep_top_k:]).reshape((-1, 6))
+ out = np.concatenate(sorted(
+ out, key=lambda e: e[1])[-keep_top_k:]).reshape((-1, 6))
out = {
'bbox': paddle.to_tensor(out),
'bbox_num': paddle.to_tensor(np.array([out.shape[0], ]))
diff --git a/ppdet/modeling/assigners/atss_assigner.py b/ppdet/modeling/assigners/atss_assigner.py
index aba857e3d88145151e2246681c2ba673675efde1..e9543c638a456ba7416d1404fdf5c606a94b2d34 100644
--- a/ppdet/modeling/assigners/atss_assigner.py
+++ b/ppdet/modeling/assigners/atss_assigner.py
@@ -199,7 +199,11 @@ class ATSSAssigner(nn.Layer):
gt_bboxes.reshape([-1, 4]), assigned_gt_index.flatten(), axis=0)
assigned_bboxes = assigned_bboxes.reshape([batch_size, num_anchors, 4])
- assigned_scores = F.one_hot(assigned_labels, self.num_classes)
+ assigned_scores = F.one_hot(assigned_labels, self.num_classes + 1)
+ ind = list(range(self.num_classes + 1))
+ ind.remove(bg_index)
+ assigned_scores = paddle.index_select(
+ assigned_scores, paddle.to_tensor(ind), axis=-1)
if pred_bboxes is not None:
# assigned iou
ious = batch_iou_similarity(gt_bboxes, pred_bboxes) * mask_positive
diff --git a/ppdet/modeling/assigners/task_aligned_assigner.py b/ppdet/modeling/assigners/task_aligned_assigner.py
index b1f47e786df0261d3925d1b5bc776683657385c1..cb0cf1db0fd952ade87738df93c06be878216856 100644
--- a/ppdet/modeling/assigners/task_aligned_assigner.py
+++ b/ppdet/modeling/assigners/task_aligned_assigner.py
@@ -143,7 +143,11 @@ class TaskAlignedAssigner(nn.Layer):
gt_bboxes.reshape([-1, 4]), assigned_gt_index.flatten(), axis=0)
assigned_bboxes = assigned_bboxes.reshape([batch_size, num_anchors, 4])
- assigned_scores = F.one_hot(assigned_labels, num_classes)
+ assigned_scores = F.one_hot(assigned_labels, num_classes + 1)
+ ind = list(range(num_classes + 1))
+ ind.remove(bg_index)
+ assigned_scores = paddle.index_select(
+ assigned_scores, paddle.to_tensor(ind), axis=-1)
# rescale alignment metrics
alignment_metrics *= mask_positive
max_metrics_per_instance = alignment_metrics.max(axis=-1, keepdim=True)
diff --git a/ppdet/modeling/heads/ppyoloe_head.py b/ppdet/modeling/heads/ppyoloe_head.py
index 920bb2298909e5275c9bc04f3c73cce3f4c8ff36..97b96259dcb3fbb4cce9d590b01071b17f364b30 100644
--- a/ppdet/modeling/heads/ppyoloe_head.py
+++ b/ppdet/modeling/heads/ppyoloe_head.py
@@ -331,7 +331,8 @@ class PPYOLOEHead(nn.Layer):
assigned_bboxes /= stride_tensor
# cls loss
if self.use_varifocal_loss:
- one_hot_label = F.one_hot(assigned_labels, self.num_classes)
+ one_hot_label = F.one_hot(assigned_labels,
+ self.num_classes + 1)[..., :-1]
loss_cls = self._varifocal_loss(pred_scores, assigned_scores,
one_hot_label)
else:
diff --git a/ppdet/modeling/losses/detr_loss.py b/ppdet/modeling/losses/detr_loss.py
index 5a589d4a2b4dae5644dc8b8ecf6f839c68559bdb..e22c5d8b101234e8b1032a540e8c98d290631f02 100644
--- a/ppdet/modeling/losses/detr_loss.py
+++ b/ppdet/modeling/losses/detr_loss.py
@@ -80,7 +80,7 @@ class DETRLoss(nn.Layer):
target_label = target_label.reshape([bs, num_query_objects])
if self.use_focal_loss:
target_label = F.one_hot(target_label,
- self.num_classes + 1)[:, :, :-1]
+ self.num_classes + 1)[..., :-1]
return {
'loss_class': self.loss_coeff['class'] * sigmoid_focal_loss(
logits, target_label, num_gts / num_query_objects)
diff --git a/ppdet/modeling/mot/matching/jde_matching.py b/ppdet/modeling/mot/matching/jde_matching.py
index e9c40dba4d3f2a82f8138229ff20b6d27cc1a0e5..308b78c64babc6151f94d2b374c8bc092a29dc03 100644
--- a/ppdet/modeling/mot/matching/jde_matching.py
+++ b/ppdet/modeling/mot/matching/jde_matching.py
@@ -26,7 +26,7 @@ warnings.filterwarnings("ignore")
__all__ = [
'merge_matches',
'linear_assignment',
- 'cython_bbox_ious',
+ 'bbox_ious',
'iou_distance',
'embedding_distance',
'fuse_motion',
@@ -68,22 +68,28 @@ def linear_assignment(cost_matrix, thresh):
return matches, unmatched_a, unmatched_b
-def cython_bbox_ious(atlbrs, btlbrs):
- ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
- if ious.size == 0:
+def bbox_ious(atlbrs, btlbrs):
+ boxes = np.ascontiguousarray(atlbrs, dtype=np.float)
+ query_boxes = np.ascontiguousarray(btlbrs, dtype=np.float)
+ N = boxes.shape[0]
+ K = query_boxes.shape[0]
+ ious = np.zeros((N, K), dtype=boxes.dtype)
+ if N * K == 0:
return ious
- try:
- import cython_bbox
- except Exception as e:
- print('cython_bbox not found, please install cython_bbox.'
- 'for example: `pip install cython_bbox`.')
- raise e
-
- ious = cython_bbox.bbox_overlaps(
- np.ascontiguousarray(
- atlbrs, dtype=np.float),
- np.ascontiguousarray(
- btlbrs, dtype=np.float))
+
+ for k in range(K):
+ box_area = ((query_boxes[k, 2] - query_boxes[k, 0] + 1) *
+ (query_boxes[k, 3] - query_boxes[k, 1] + 1))
+ for n in range(N):
+ iw = (min(boxes[n, 2], query_boxes[k, 2]) - max(
+ boxes[n, 0], query_boxes[k, 0]) + 1)
+ if iw > 0:
+ ih = (min(boxes[n, 3], query_boxes[k, 3]) - max(
+ boxes[n, 1], query_boxes[k, 1]) + 1)
+ if ih > 0:
+ ua = float((boxes[n, 2] - boxes[n, 0] + 1) * (boxes[
+ n, 3] - boxes[n, 1] + 1) + box_area - iw * ih)
+ ious[n, k] = iw * ih / ua
return ious
@@ -98,7 +104,7 @@ def iou_distance(atracks, btracks):
else:
atlbrs = [track.tlbr for track in atracks]
btlbrs = [track.tlbr for track in btracks]
- _ious = cython_bbox_ious(atlbrs, btlbrs)
+ _ious = bbox_ious(atlbrs, btlbrs)
cost_matrix = 1 - _ious
return cost_matrix
diff --git a/requirements.txt b/requirements.txt
index 91c79fc0f396546bb86f26abbaebd4a503d2ebbe..e4009c30fc54ef29926bdee2dd798076947fa119 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -14,4 +14,3 @@ lap
sklearn
motmetrics
openpyxl
-cython_bbox