diff --git a/configs/datasets/coco_detection.yml b/configs/datasets/coco_detection.yml index 7a62c3b0b57a5d76c8ed519d3a3940c1b4532c15..614135743712b195a8d3822609d9a3c28ee60d22 100644 --- a/configs/datasets/coco_detection.yml +++ b/configs/datasets/coco_detection.yml @@ -17,3 +17,4 @@ EvalDataset: TestDataset: !ImageFolder anno_path: annotations/instances_val2017.json + dataset_dir: dataset/coco diff --git a/configs/datasets/coco_instance.yml b/configs/datasets/coco_instance.yml index 5eaf76791a94bfd2819ba6dab610fae54b69f26e..5b074b00d73ced75247b6ad56604e7c90b705c1a 100644 --- a/configs/datasets/coco_instance.yml +++ b/configs/datasets/coco_instance.yml @@ -17,3 +17,4 @@ EvalDataset: TestDataset: !ImageFolder anno_path: annotations/instances_val2017.json + dataset_dir: dataset/coco diff --git a/configs/datasets/dota.yml b/configs/datasets/dota.yml index f9d9395b00d7ed9028396044c407784d251e43e5..5153163d95a8a418a82d3d6d43f6e1f9404ed075 100644 --- a/configs/datasets/dota.yml +++ b/configs/datasets/dota.yml @@ -17,3 +17,4 @@ EvalDataset: TestDataset: !ImageFolder anno_path: trainval_split/s2anet_trainval_paddle_coco.json + dataset_dir: dataset/DOTA_1024_s2anet/ diff --git a/configs/mot/bytetrack/_base_/mot17.yml b/configs/mot/bytetrack/_base_/mot17.yml index 2efa55546026168c39396c4d51a71428e19a0638..faf47f622d1c2847a9686dfa8d7e48a49c05436c 100644 --- a/configs/mot/bytetrack/_base_/mot17.yml +++ b/configs/mot/bytetrack/_base_/mot17.yml @@ -17,6 +17,7 @@ EvalDataset: TestDataset: !ImageFolder + dataset_dir: dataset/mot/MOT17 anno_path: annotations/val_half.json diff --git a/configs/mot/deepsort/_base_/mot17.yml b/configs/mot/deepsort/_base_/mot17.yml index 2efa55546026168c39396c4d51a71428e19a0638..faf47f622d1c2847a9686dfa8d7e48a49c05436c 100644 --- a/configs/mot/deepsort/_base_/mot17.yml +++ b/configs/mot/deepsort/_base_/mot17.yml @@ -17,6 +17,7 @@ EvalDataset: TestDataset: !ImageFolder + dataset_dir: dataset/mot/MOT17 anno_path: annotations/val_half.json diff --git a/configs/picodet/README.md b/configs/picodet/README.md index a226cc9a95e91b3e28635023996e201eed08e089..46167b5d7631aa5167ea936cba4313fe1ea20148 100644 --- a/configs/picodet/README.md +++ b/configs/picodet/README.md @@ -1,60 +1,63 @@ -English | [简体中文](README_cn.md) +简体中文 | [English](README_en.md) # PP-PicoDet ![](../../docs/images/picedet_demo.jpeg) -## News +## 最新动态 -- Released a new series of PP-PicoDet models: **(2022.03.20)** - - (1) It was used TAL/Task-aligned-Head and optimized PAN, which greatly improved the accuracy; - - (2) Moreover optimized CPU prediction speed, and the training speed is greatly improved; - - (3) The export model includes post-processing, and the prediction directly outputs the result, without secondary development, and the migration cost is lower. +- 发布全新系列PP-PicoDet模型:**(2022.03.20)** + - (1)引入TAL及Task-aligned Head,优化PAN等结构,精度大幅提升; + - (2)优化CPU端预测速度,同时训练速度大幅提升; + - (3)导出模型将后处理包含在网络中,预测直接输出box结果,无需二次开发,迁移成本更低。 -### Legacy Model +## 历史版本模型 -- Please refer to: [PicoDet 2021.10版本](./legacy_model/) +- 详情请参考:[PicoDet 2021.10版本](./legacy_model/) -## Introduction +## 简介 -We developed a series of lightweight models, named `PP-PicoDet`. Because of the excellent performance, our models are very suitable for deployment on mobile or CPU. For more details, please refer to our [report on arXiv](https://arxiv.org/abs/2111.00902). +PaddleDetection中提出了全新的轻量级系列模型`PP-PicoDet`,在移动端具有卓越的性能,成为全新SOTA轻量级模型。详细的技术细节可以参考我们的[arXiv技术报告](https://arxiv.org/abs/2111.00902)。 -- 🌟 Higher mAP: the **first** object detectors that surpass mAP(0.5:0.95) **30+** within 1M parameters when the input size is 416. -- 🚀 Faster latency: 150FPS on mobile ARM CPU. -- 😊 Deploy friendly: support PaddleLite/MNN/NCNN/OpenVINO and provide C++/Python/Android implementation. -- 😍 Advanced algorithm: use the most advanced algorithms and offer innovation, such as ESNet, CSP-PAN, SimOTA with VFL, etc. +PP-PicoDet模型有如下特点: + +- 🌟 更高的mAP: 第一个在1M参数量之内`mAP(0.5:0.95)`超越**30+**(输入416像素时)。 +- 🚀 更快的预测速度: 网络预测在ARM CPU下可达150FPS。 +- 😊 部署友好: 支持PaddleLite/MNN/NCNN/OpenVINO等预测库,支持转出ONNX,提供了C++/Python/Android的demo。 +- 😍 先进的算法: 我们在现有SOTA算法中进行了创新, 包括:ESNet, CSP-PAN, SimOTA等等。
-## Benchmark +## 基线 -| Model | Input size | mAPval
0.5:0.95 | mAPval
0.5 | Params
(M) | FLOPS
(G) | Latency[CPU](#latency)
(ms) | Latency[Lite](#latency)
(ms) | Download | Config | +| 模型 | 输入尺寸 | mAPval
0.5:0.95 | mAPval
0.5 | 参数量
(M) | FLOPS
(G) | 预测时延[CPU](#latency)
(ms) | 预测时延[Lite](#latency)
(ms) | 下载 | 配置文件 | | :-------- | :--------: | :---------------------: | :----------------: | :----------------: | :---------------: | :-----------------------------: | :-----------------------------: | :----------------------------------------: | :--------------------------------------- | -| PicoDet-XS | 320*320 | 23.5 | 36.1 | 0.70 | 0.67 | 10.9ms | 7.81ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_xs_320_coco_lcnet.yml) | -| PicoDet-XS | 416*416 | 26.2 | 39.3 | 0.70 | 1.13 | 15.4ms | 12.38ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_xs_416_coco_lcnet.yml) | -| PicoDet-S | 320*320 | 29.1 | 43.4 | 1.18 | 0.97 | 12.6ms | 9.56ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_320_coco_lcnet.yml) | -| PicoDet-S | 416*416 | 32.5 | 47.6 | 1.18 | 1.65 | 17.2ms | 15.20 | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_416_coco_lcnet.yml) | -| PicoDet-M | 320*320 | 34.4 | 50.0 | 3.46 | 2.57 | 14.5ms | 17.68ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_320_coco_lcnet.yml) | -| PicoDet-M | 416*416 | 37.5 | 53.4 | 3.46 | 4.34 | 19.5ms | 28.39ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_416_coco_lcnet.yml) | -| PicoDet-L | 320*320 | 36.1 | 52.0 | 5.80 | 4.20 | 18.3ms | 25.21ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_320_coco_lcnet.yml) | -| PicoDet-L | 416*416 | 39.4 | 55.7 | 5.80 | 7.10 | 22.1ms | 42.23ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_416_coco_lcnet.yml) | -| PicoDet-L | 640*640 | 42.3 | 59.2 | 5.80 | 16.81 | 43.1ms | 108.1ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_640_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_640_coco_lcnet.yml) | +| PicoDet-XS | 320*320 | 23.5 | 36.1 | 0.70 | 0.67 | 10.9ms | 7.81ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_xs_320_coco_lcnet.yml) | +| PicoDet-XS | 416*416 | 26.2 | 39.3 | 0.70 | 1.13 | 15.4ms | 12.38ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_xs_416_coco_lcnet.yml) | +| PicoDet-S | 320*320 | 29.1 | 43.4 | 1.18 | 0.97 | 12.6ms | 9.56ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_s_320_coco_lcnet.yml) | +| PicoDet-S | 416*416 | 32.5 | 47.6 | 1.18 | 1.65 | 17.2ms | 15.20ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_s_416_coco_lcnet.yml) | +| PicoDet-M | 320*320 | 34.4 | 50.0 | 3.46 | 2.57 | 14.5ms | 17.68ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_m_320_coco_lcnet.yml) | +| PicoDet-M | 416*416 | 37.5 | 53.4 | 3.46 | 4.34 | 19.5ms | 28.39ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_m_416_coco_lcnet.yml) | +| PicoDet-L | 320*320 | 36.1 | 52.0 | 5.80 | 4.20 | 18.3ms | 25.21ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_l_320_coco_lcnet.yml) | +| PicoDet-L | 416*416 | 39.4 | 55.7 | 5.80 | 7.10 | 22.1ms | 42.23ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_l_416_coco_lcnet.yml) | +| PicoDet-L | 640*640 | 42.6 | 59.2 | 5.80 | 16.81 | 43.1ms | 108.1ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_640_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_l_640_coco_lcnet.yml) | +
-Table Notes: +注意事项: -- Latency: All our models test on `Intel-Xeon-Gold-6148` CPU with MKLDNN by 10 threads and `Qualcomm Snapdragon 865(4xA77+4xA55)` with 4 threads by arm8 and with FP16. In the above table, test CPU latency on Paddle-Inference and testing Mobile latency with `Lite`->[Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite). -- PicoDet is trained on COCO train2017 dataset and evaluated on COCO val2017. And PicoDet used 4 GPUs for training and all checkpoints are trained with default settings and hyperparameters. -- Benchmark test: When testing the speed benchmark, the post-processing is not included in the exported model, you need to set `-o export.benchmark=True` or manually modify [runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml#L12). +- 时延测试: 我们所有的模型都在英特尔至强6148的CPU(MKLDNN 10线程)和`骁龙865(4xA77+4xA55)`的ARM CPU上测试(4线程,FP16预测)。上面表格中标有`CPU`的是使用Paddle Inference库测试,标有`Lite`的是使用[Paddle Lite](https://github.com/PaddlePaddle/Paddle-Lite)进行测试。 +- PicoDet在COCO train2017上训练,并且在COCO val2017上进行验证。使用4卡GPU训练,并且上表所有的预训练模型都是通过发布的默认配置训练得到。 +- Benchmark测试:测试速度benchmark性能时,导出模型后处理不包含在网络中,需要设置`-o export.benchmark=True` 或手动修改[runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/configs/runtime.yml#L12)。
-#### Benchmark of Other Models +#### 其他模型的基线 -| Model | Input size | mAPval
0.5:0.95 | mAPval
0.5 | Params
(M) | FLOPS
(G) | Latency[NCNN](#latency)
(ms) | +| 模型 | 输入尺寸 | mAPval
0.5:0.95 | mAPval
0.5 | 参数量
(M) | FLOPS
(G) | 预测时延[NCNN](#latency)
(ms) | | :-------- | :--------: | :---------------------: | :----------------: | :----------------: | :---------------: | :-----------------------------: | | YOLOv3-Tiny | 416*416 | 16.6 | 33.1 | 8.86 | 5.62 | 25.42 | | YOLOv4-Tiny | 416*416 | 21.7 | 40.2 | 6.06 | 6.96 | 23.69 | @@ -68,38 +71,39 @@ We developed a series of lightweight models, named `PP-PicoDet`. Because of the | YOLOv5n | 640*640 | 28.4 | 46.0 | 1.9 | 4.5 | 40.35 | | YOLOv5s | 640*640 | 37.2 | 56.0 | 7.2 | 16.5 | 78.05 | -- Testing Mobile latency with code: [MobileDetBenchmark](https://github.com/JiweiMaster/MobileDetBenchmark). +- ARM测试的benchmark脚本来自: [MobileDetBenchmark](https://github.com/JiweiMaster/MobileDetBenchmark)。 -## Quick Start +## 快速开始
-Requirements: +依赖包: -- PaddlePaddle >= 2.2.1 +- PaddlePaddle == 2.2.2
-Installation +安装 -- [Installation guide](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/INSTALL.md) -- [Prepare dataset](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/PrepareDataSet_en.md) +- [安装指导文档](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/docs/tutorials/INSTALL.md) +- [准备数据文档](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/docs/tutorials/PrepareDataSet_en.md)
-Training and Evaluation +训练&评估 -- Training model on single-GPU: +- 单卡GPU上训练: ```shell # training on single-GPU export CUDA_VISIBLE_DEVICES=0 python tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --eval ``` -If the GPU is out of memory during training, reduce the batch_size in TrainReader, and reduce the base_lr in LearningRate proportionally. -- Training model on multi-GPU: +**注意:**如果训练时显存out memory,将TrainReader中batch_size调小,同时LearningRate中base_lr等比例减小。同时我们发布的config均由4卡训练得到,如果改变GPU卡数为1,那么base_lr需要减小4倍。 + +- 多卡GPU上训练: ```shell @@ -108,31 +112,31 @@ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --eval ``` -- Evaluation: +- 评估: ```shell python tools/eval.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \ -o weights=https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams ``` -- Infer: +- 测试: ```shell python tools/infer.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \ -o weights=https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams ``` -Detail also can refer to [Quick start guide](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/GETTING_STARTED.md). +详情请参考[快速开始文档](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/docs/tutorials/GETTING_STARTED.md).
-## Deployment +## 部署 -### Export and Convert Model +### 导出及转换模型
-1. Export model (click to expand) +1. 导出模型 (点击展开) ```shell cd PaddleDetection @@ -141,18 +145,21 @@ python tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \ --output_dir=inference_model ``` +- 如无需导出后处理,请指定:`-o export.benchmark=True`(如果-o已出现过,此处删掉-o)或者手动修改[runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/configs/runtime.yml) 中相应字段。 +- 如无需导出NMS,请指定:`-o export.nms=False`或者手动修改[runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/configs/runtime.yml) 中相应字段。 +
-2. Convert to PaddleLite (click to expand) +2. 转换模型至Paddle Lite (点击展开) -- Install Paddlelite>=2.10: +- 安装Paddlelite>=2.10: ```shell pip install paddlelite ``` -- Convert model: +- 转换模型至Paddle Lite格式: ```shell # FP32 @@ -164,16 +171,16 @@ paddle_lite_opt --model_dir=inference_model/picodet_s_320_coco_lcnet --valid_tar
-3. Convert to ONNX (click to expand) +3. 转换模型至ONNX (点击展开) -- Install [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX) >= 0.7 and ONNX > 1.10.1, for details, please refer to [Tutorials of Export ONNX Model](../../deploy/EXPORT_ONNX_MODEL.md) +- 安装[Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX) >= 0.7 并且 ONNX > 1.10.1, 细节请参考[导出ONNX模型教程](../../deploy/EXPORT_ONNX_MODEL.md) ```shell pip install onnx -pip install paddle2onnx +pip install paddle2onnx==0.9.2 ``` -- Convert model: +- 转换模型: ```shell paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \ @@ -183,22 +190,22 @@ paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \ --save_file picodet_s_320_coco.onnx ``` -- Simplify ONNX model: use onnx-simplifier to simplify onnx model. +- 简化ONNX模型: 使用`onnx-simplifier`库来简化ONNX模型。 - - Install onnx-simplifier >= 0.3.6: + - 安装 onnx-simplifier >= 0.3.6: ```shell pip install onnx-simplifier ``` - - simplify onnx model: + - 简化ONNX模型: ```shell python -m onnxsim picodet_s_320_coco.onnx picodet_s_processed.onnx ```
-- Deploy models +- 部署用的模型 -| Model | Input size | ONNX | Paddle Lite(fp32) | Paddle Lite(fp16) | +| 模型 | 输入尺寸 | ONNX | Paddle Lite(fp32) | Paddle Lite(fp16) | | :-------- | :--------: | :---------------------: | :----------------: | :----------------: | | PicoDet-S | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_320_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320_fp16.tar) | | PicoDet-S | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416_fp16.tar) | @@ -212,31 +219,28 @@ paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \ | PicoDet-LCNet 1.5x | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_lcnet_1_5x_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_lcnet_1_5x.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_lcnet_1_5x_fp16.tar) | -### Deploy +### 部署 - PaddleInference demo [Python](../../deploy/python) & [C++](../../deploy/cpp) - [PaddleLite C++ demo](../../deploy/lite) -- [NCNN C++/Python demo](../../deploy/third_engine/demo_ncnn) -- [MNN C++/Python demo](../../deploy/third_engine/demo_mnn) -- [OpenVINO C++ demo](../../deploy/third_engine/demo_openvino) -- [Android demo(Paddle Lite)](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/develop/object_detection/android/app/cxx/picodet_detection_demo) +- [Android demo(Paddle Lite)](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/release/2.4/object_detection/android/app/cxx/picodet_detection_demo) -Android demo visualization: +Android demo可视化:
-## Quantization +## 量化
-Requirements: +依赖包: - PaddlePaddle >= 2.2.2 - PaddleSlim >= 2.2.1 -**Install:** +**安装:** ```shell pip install paddleslim==2.2.1 @@ -245,61 +249,61 @@ pip install paddleslim==2.2.1
-Quant aware (click to expand) +量化训练 (点击展开) -Configure the quant config and start training: +开始量化训练: ```shell python tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \ --slim_config configs/slim/quant/picodet_s_quant.yml --eval ``` -- More detail can refer to [slim document](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/slim) +- 更多细节请参考[slim文档](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/slim)
-Post quant (click to expand) +离线量化 (点击展开) -Configure the post quant config and start calibrate model: +校准及导出量化模型: ```shell python tools/post_quant.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \ --slim_config configs/slim/post_quant/picodet_s_ptq.yml ``` -- Notes: Now the accuracy of post quant is abnormal and this problem is being solved. +- 注意: 离线量化模型精度问题正在解决中.
-## Unstructured Pruning +## 非结构化剪枝
-Toturial: +教程: -Please refer this [documentation](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/pruner/README.md) for details such as requirements, training and deployment. +训练及部署细节请参考[非结构化剪枝文档](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/pruner/README.md)。
-## Application +## 应用 -- **Pedestrian detection:** model zoo of `PicoDet-S-Pedestrian` please refer to [PP-TinyPose](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/keypoint/tiny_pose#%E8%A1%8C%E4%BA%BA%E6%A3%80%E6%B5%8B%E6%A8%A1%E5%9E%8B) +- **行人检测:** `PicoDet-S-Pedestrian`行人检测模型请参考[PP-TinyPose](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/keypoint/tiny_pose#%E8%A1%8C%E4%BA%BA%E6%A3%80%E6%B5%8B%E6%A8%A1%E5%9E%8B) -- **Mainbody detection:** model zoo of `PicoDet-L-Mainbody` please refer to [mainbody detection](./application/mainbody_detection/README.md) +- **主体检测:** `PicoDet-L-Mainbody`主体检测模型请参考[主体检测文档](./application/mainbody_detection/README.md) ## FAQ
-Out of memory error. +显存爆炸(Out of memory error) -Please reduce the `batch_size` of `TrainReader` in config. +请减小配置文件中`TrainReader`的`batch_size`。
-How to transfer learning. +如何迁移学习 -Please reset `pretrain_weights` in config, which trained on coco. Such as: +请重新设置配置文件中的`pretrain_weights`字段,比如利用COCO上训好的模型在自己的数据上继续训练: ```yaml pretrain_weights: https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams ``` @@ -307,17 +311,17 @@ pretrain_weights: https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcne
-The transpose operator is time-consuming on some hardware. +`transpose`算子在某些硬件上耗时验证 -Please use `PicoDet-LCNet` model, which has fewer `transpose` operators. +请使用`PicoDet-LCNet`模型,`transpose`较少。
-How to count model parameters. +如何计算模型参数量。 -You can insert below code at [here](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/engine/trainer.py#L141) to count learnable parameters. +可以将以下代码插入:[trainer.py](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/ppdet/engine/trainer.py#L141) 来计算参数量。 ```python params = sum([ @@ -329,8 +333,8 @@ print('params: ', params)
-## Cite PP-PicoDet -If you use PicoDet in your research, please cite our work by using the following BibTeX entry: +## 引用PP-PicoDet +如果需要在你的研究中使用PP-PicoDet,请通过一下方式引用我们的技术报告: ``` @misc{yu2021pppicodet, title={PP-PicoDet: A Better Real-Time Object Detector on Mobile Devices}, diff --git a/configs/picodet/README_cn.md b/configs/picodet/README_en.md similarity index 61% rename from configs/picodet/README_cn.md rename to configs/picodet/README_en.md index 7131200a2e106e50fe71a97eda566a4520bfc5e8..6b9be76c431f49cd62b32b84911cbac950431691 100644 --- a/configs/picodet/README_cn.md +++ b/configs/picodet/README_en.md @@ -1,63 +1,60 @@ -简体中文 | [English](README.md) +English | [简体中文](README.md) # PP-PicoDet ![](../../docs/images/picedet_demo.jpeg) -## 最新动态 +## News -- 发布全新系列PP-PicoDet模型:**(2022.03.20)** - - (1)引入TAL及Task-aligned Head,优化PAN等结构,精度大幅提升; - - (2)优化CPU端预测速度,同时训练速度大幅提升; - - (3)导出模型将后处理包含在网络中,预测直接输出box结果,无需二次开发,迁移成本更低。 +- Released a new series of PP-PicoDet models: **(2022.03.20)** + - (1) It was used TAL/Task-aligned-Head and optimized PAN, which greatly improved the accuracy; + - (2) Moreover optimized CPU prediction speed, and the training speed is greatly improved; + - (3) The export model includes post-processing, and the prediction directly outputs the result, without secondary release/2.4ment, and the migration cost is lower. -## 历史版本模型 +### Legacy Model -- 详情请参考:[PicoDet 2021.10版本](./legacy_model/) +- Please refer to: [PicoDet 2021.10](./legacy_model/) -## 简介 +## Introduction -PaddleDetection中提出了全新的轻量级系列模型`PP-PicoDet`,在移动端具有卓越的性能,成为全新SOTA轻量级模型。详细的技术细节可以参考我们的[arXiv技术报告](https://arxiv.org/abs/2111.00902)。 +We release/2.4ed a series of lightweight models, named `PP-PicoDet`. Because of the excellent performance, our models are very suitable for deployment on mobile or CPU. For more details, please refer to our [report on arXiv](https://arxiv.org/abs/2111.00902). -PP-PicoDet模型有如下特点: - -- 🌟 更高的mAP: 第一个在1M参数量之内`mAP(0.5:0.95)`超越**30+**(输入416像素时)。 -- 🚀 更快的预测速度: 网络预测在ARM CPU下可达150FPS。 -- 😊 部署友好: 支持PaddleLite/MNN/NCNN/OpenVINO等预测库,支持转出ONNX,提供了C++/Python/Android的demo。 -- 😍 先进的算法: 我们在现有SOTA算法中进行了创新, 包括:ESNet, CSP-PAN, SimOTA等等。 +- 🌟 Higher mAP: the **first** object detectors that surpass mAP(0.5:0.95) **30+** within 1M parameters when the input size is 416. +- 🚀 Faster latency: 150FPS on mobile ARM CPU. +- 😊 Deploy friendly: support PaddleLite/MNN/NCNN/OpenVINO and provide C++/Python/Android implementation. +- 😍 Advanced algorithm: use the most advanced algorithms and offer innovation, such as ESNet, CSP-PAN, SimOTA with VFL, etc.
-## 基线 +## Benchmark -| 模型 | 输入尺寸 | mAPval
0.5:0.95 | mAPval
0.5 | 参数量
(M) | FLOPS
(G) | 预测时延[NCNN](#latency)
(ms) | 预测时延[Lite](#latency)
(ms) | 下载 | 配置文件 | +| Model | Input size | mAPval
0.5:0.95 | mAPval
0.5 | Params
(M) | FLOPS
(G) | Latency[CPU](#latency)
(ms) | Latency[Lite](#latency)
(ms) | Download | Config | | :-------- | :--------: | :---------------------: | :----------------: | :----------------: | :---------------: | :-----------------------------: | :-----------------------------: | :----------------------------------------: | :--------------------------------------- | -| PicoDet-XS | 320*320 | 23.5 | 36.1 | 0.70 | 0.67 | 10.9ms | 7.81ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_xs_320_coco_lcnet.yml) | -| PicoDet-XS | 416*416 | 26.2 | 39.3 | 0.70 | 1.13 | 15.4ms | 12.38ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_xs_416_coco_lcnet.yml) | -| PicoDet-S | 320*320 | 29.1 | 43.4 | 1.18 | 0.97 | 12.6ms | 9.56ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_320_coco_lcnet.yml) | -| PicoDet-S | 416*416 | 32.5 | 47.6 | 1.18 | 1.65 | 17.2ms | 15.20 | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_416_coco_lcnet.yml) | -| PicoDet-M | 320*320 | 34.4 | 50.0 | 3.46 | 2.57 | 14.5ms | 17.68ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_320_coco_lcnet.yml) | -| PicoDet-M | 416*416 | 37.5 | 53.4 | 3.46 | 4.34 | 19.5ms | 28.39ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_416_coco_lcnet.yml) | -| PicoDet-L | 320*320 | 36.1 | 52.0 | 5.80 | 4.20 | 18.3ms | 25.21ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_320_coco_lcnet.yml) | -| PicoDet-L | 416*416 | 39.4 | 55.7 | 5.80 | 7.10 | 22.1ms | 42.23ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_416_coco_lcnet.yml) | -| PicoDet-L | 640*640 | 42.3 | 59.2 | 5.80 | 16.81 | 43.1ms | 108.1ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_640_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_640_coco_lcnet.yml) | - +| PicoDet-XS | 320*320 | 23.5 | 36.1 | 0.70 | 0.67 | 10.9ms | 7.81ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_xs_320_coco_lcnet.yml) | +| PicoDet-XS | 416*416 | 26.2 | 39.3 | 0.70 | 1.13 | 15.4ms | 12.38ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_xs_416_coco_lcnet.yml) | +| PicoDet-S | 320*320 | 29.1 | 43.4 | 1.18 | 0.97 | 12.6ms | 9.56ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_s_320_coco_lcnet.yml) | +| PicoDet-S | 416*416 | 32.5 | 47.6 | 1.18 | 1.65 | 17.2ms | 15.20ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_s_416_coco_lcnet.yml) | +| PicoDet-M | 320*320 | 34.4 | 50.0 | 3.46 | 2.57 | 14.5ms | 17.68ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_m_320_coco_lcnet.yml) | +| PicoDet-M | 416*416 | 37.5 | 53.4 | 3.46 | 4.34 | 19.5ms | 28.39ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_m_416_coco_lcnet.yml) | +| PicoDet-L | 320*320 | 36.1 | 52.0 | 5.80 | 4.20 | 18.3ms | 25.21ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_l_320_coco_lcnet.yml) | +| PicoDet-L | 416*416 | 39.4 | 55.7 | 5.80 | 7.10 | 22.1ms | 42.23ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_l_416_coco_lcnet.yml) | +| PicoDet-L | 640*640 | 42.6 | 59.2 | 5.80 | 16.81 | 43.1ms | 108.1ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_640_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/picodet_l_640_coco_lcnet.yml) |
-注意事项: +Table Notes: -- 时延测试: 我们所有的模型都在英特尔至强6148的CPU(MKLDNN 10线程)和`骁龙865(4xA77+4xA55)`的ARM CPU上测试(4线程,FP16预测)。上面表格中标有`CPU`的是使用Paddle Inference库测试,标有`Lite`的是使用[Paddle Lite](https://github.com/PaddlePaddle/Paddle-Lite)进行测试。 -- PicoDet在COCO train2017上训练,并且在COCO val2017上进行验证。使用4卡GPU训练,并且上表所有的预训练模型都是通过发布的默认配置训练得到。 -- Benchmark测试:测试速度benchmark性能时,导出模型后处理不包含在网络中,需要设置`-o export.benchmark=True` 或手动修改[runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml#L12)。 +- Latency: All our models test on `Intel-Xeon-Gold-6148` CPU with MKLDNN by 10 threads and `Qualcomm Snapdragon 865(4xA77+4xA55)` with 4 threads by arm8 and with FP16. In the above table, test CPU latency on Paddle-Inference and testing Mobile latency with `Lite`->[Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite). +- PicoDet is trained on COCO train2017 dataset and evaluated on COCO val2017. And PicoDet used 4 GPUs for training and all checkpoints are trained with default settings and hyperparameters. +- Benchmark test: When testing the speed benchmark, the post-processing is not included in the exported model, you need to set `-o export.benchmark=True` or manually modify [runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/configs/runtime.yml#L12).
-#### 其他模型的基线 +#### Benchmark of Other Models -| 模型 | 输入尺寸 | mAPval
0.5:0.95 | mAPval
0.5 | 参数量
(M) | FLOPS
(G) | 预测时延[NCNN](#latency)
(ms) | +| Model | Input size | mAPval
0.5:0.95 | mAPval
0.5 | Params
(M) | FLOPS
(G) | Latency[NCNN](#latency)
(ms) | | :-------- | :--------: | :---------------------: | :----------------: | :----------------: | :---------------: | :-----------------------------: | | YOLOv3-Tiny | 416*416 | 16.6 | 33.1 | 8.86 | 5.62 | 25.42 | | YOLOv4-Tiny | 416*416 | 21.7 | 40.2 | 6.06 | 6.96 | 23.69 | @@ -71,39 +68,38 @@ PP-PicoDet模型有如下特点: | YOLOv5n | 640*640 | 28.4 | 46.0 | 1.9 | 4.5 | 40.35 | | YOLOv5s | 640*640 | 37.2 | 56.0 | 7.2 | 16.5 | 78.05 | -- ARM测试的benchmark脚本来自: [MobileDetBenchmark](https://github.com/JiweiMaster/MobileDetBenchmark)。 +- Testing Mobile latency with code: [MobileDetBenchmark](https://github.com/JiweiMaster/MobileDetBenchmark). -## 快速开始 +## Quick Start
-依赖包: +Requirements: -- PaddlePaddle == 2.2.2 +- PaddlePaddle >= 2.2.2
-安装 +Installation -- [安装指导文档](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/INSTALL.md) -- [准备数据文档](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/PrepareDataSet_en.md) +- [Installation guide](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/docs/tutorials/INSTALL.md) +- [Prepare dataset](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/docs/tutorials/PrepareDataSet_en.md)
-训练&评估 +Training and Evaluation -- 单卡GPU上训练: +- Training model on single-GPU: ```shell # training on single-GPU export CUDA_VISIBLE_DEVICES=0 python tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --eval ``` +If the GPU is out of memory during training, reduce the batch_size in TrainReader, and reduce the base_lr in LearningRate proportionally. At the same time, the configs we published are all trained with 4 GPUs. If the number of GPUs is changed to 1, the base_lr needs to be reduced by a factor of 4. -如果训练时显存out memory,将TrainReader中batch_size调小,同时LearningRate中base_lr等比例减小。 - -- 多卡GPU上训练: +- Training model on multi-GPU: ```shell @@ -112,31 +108,31 @@ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --eval ``` -- 评估: +- Evaluation: ```shell python tools/eval.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \ -o weights=https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams ``` -- 测试: +- Infer: ```shell python tools/infer.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \ -o weights=https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams ``` -详情请参考[快速开始文档](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/GETTING_STARTED.md). +Detail also can refer to [Quick start guide](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/docs/tutorials/GETTING_STARTED.md).
-## 部署 +## Deployment -### 导出及转换模型 +### Export and Convert Model
-1. 导出模型 (点击展开) +1. Export model (click to expand) ```shell cd PaddleDetection @@ -145,18 +141,22 @@ python tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \ --output_dir=inference_model ``` +- If no post processing is required, please specify: `-o export.benchmark=True` (if -o has already appeared, delete -o here) or manually modify corresponding fields in [runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/configs/runtime.yml). +- If no NMS is required, please specify: `-o export.nms=True` or manually modify corresponding fields in [runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/configs/runtime.yml). + +
-2. 转换模型至Paddle Lite (点击展开) +2. Convert to PaddleLite (click to expand) -- 安装Paddlelite>=2.10: +- Install Paddlelite>=2.10: ```shell pip install paddlelite ``` -- 转换模型至Paddle Lite格式: +- Convert model: ```shell # FP32 @@ -168,16 +168,16 @@ paddle_lite_opt --model_dir=inference_model/picodet_s_320_coco_lcnet --valid_tar
-3. 转换模型至ONNX (点击展开) +3. Convert to ONNX (click to expand) -- 安装[Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX) >= 0.7 并且 ONNX > 1.10.1, 细节请参考[导出ONNX模型教程](../../deploy/EXPORT_ONNX_MODEL.md) +- Install [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX) >= 0.7 and ONNX > 1.10.1, for details, please refer to [Tutorials of Export ONNX Model](../../deploy/EXPORT_ONNX_MODEL.md) ```shell pip install onnx -pip install paddle2onnx +pip install paddle2onnx==0.9.2 ``` -- 转换模型: +- Convert model: ```shell paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \ @@ -187,22 +187,22 @@ paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \ --save_file picodet_s_320_coco.onnx ``` -- 简化ONNX模型: 使用`onnx-simplifier`库来简化ONNX模型。 +- Simplify ONNX model: use onnx-simplifier to simplify onnx model. - - 安装 onnx-simplifier >= 0.3.6: + - Install onnx-simplifier >= 0.3.6: ```shell pip install onnx-simplifier ``` - - 简化ONNX模型: + - simplify onnx model: ```shell python -m onnxsim picodet_s_320_coco.onnx picodet_s_processed.onnx ```
-- 部署用的模型 +- Deploy models -| 模型 | 输入尺寸 | ONNX | Paddle Lite(fp32) | Paddle Lite(fp16) | +| Model | Input size | ONNX | Paddle Lite(fp32) | Paddle Lite(fp16) | | :-------- | :--------: | :---------------------: | :----------------: | :----------------: | | PicoDet-S | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_320_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320_fp16.tar) | | PicoDet-S | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416_fp16.tar) | @@ -216,31 +216,28 @@ paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \ | PicoDet-LCNet 1.5x | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_lcnet_1_5x_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_lcnet_1_5x.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_lcnet_1_5x_fp16.tar) | -### 部署 +### Deploy - PaddleInference demo [Python](../../deploy/python) & [C++](../../deploy/cpp) - [PaddleLite C++ demo](../../deploy/lite) -- [NCNN C++/Python demo](../../deploy/third_engine/demo_ncnn) -- [MNN C++/Python demo](../../deploy/third_engine/demo_mnn) -- [OpenVINO C++ demo](../../deploy/third_engine/demo_openvino) -- [Android demo(Paddle Lite)](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/develop/object_detection/android/app/cxx/picodet_detection_demo) +- [Android demo(Paddle Lite)](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/release/2.4/object_detection/android/app/cxx/picodet_detection_demo) -Android demo可视化: +Android demo visualization:
-## 量化 +## Quantization
-依赖包: +Requirements: - PaddlePaddle >= 2.2.2 - PaddleSlim >= 2.2.1 -**安装:** +**Install:** ```shell pip install paddleslim==2.2.1 @@ -249,61 +246,61 @@ pip install paddleslim==2.2.1
-量化训练 (点击展开) +Quant aware (click to expand) -开始量化训练: +Configure the quant config and start training: ```shell python tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \ --slim_config configs/slim/quant/picodet_s_quant.yml --eval ``` -- 更多细节请参考[slim文档](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/slim) +- More detail can refer to [slim document](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/slim)
-离线量化 (点击展开) +Post quant (click to expand) -校准及导出量化模型: +Configure the post quant config and start calibrate model: ```shell python tools/post_quant.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \ --slim_config configs/slim/post_quant/picodet_s_ptq.yml ``` -- 注意: 离线量化模型精度问题正在解决中. +- Notes: Now the accuracy of post quant is abnormal and this problem is being solved.
-## 非结构化剪枝 +## Unstructured Pruning
-教程: +Toturial: -训练及部署细节请参考[非结构化剪枝文档](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/pruner/README.md)。 +Please refer this [documentation](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet/pruner/README.md) for details such as requirements, training and deployment.
-## 应用 +## Application -- **行人检测:** `PicoDet-S-Pedestrian`行人检测模型请参考[PP-TinyPose](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/keypoint/tiny_pose#%E8%A1%8C%E4%BA%BA%E6%A3%80%E6%B5%8B%E6%A8%A1%E5%9E%8B) +- **Pedestrian detection:** model zoo of `PicoDet-S-Pedestrian` please refer to [PP-TinyPose](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/keypoint/tiny_pose#%E8%A1%8C%E4%BA%BA%E6%A3%80%E6%B5%8B%E6%A8%A1%E5%9E%8B) -- **主体检测:** `PicoDet-L-Mainbody`主体检测模型请参考[主体检测文档](./application/mainbody_detection/README.md) +- **Mainbody detection:** model zoo of `PicoDet-L-Mainbody` please refer to [mainbody detection](./application/mainbody_detection/README.md) ## FAQ
-显存爆炸(Out of memory error) +Out of memory error. -请减小配置文件中`TrainReader`的`batch_size`。 +Please reduce the `batch_size` of `TrainReader` in config.
-如何迁移学习 +How to transfer learning. -请重新设置配置文件中的`pretrain_weights`字段,比如利用COCO上训好的模型在自己的数据上继续训练: +Please reset `pretrain_weights` in config, which trained on coco. Such as: ```yaml pretrain_weights: https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams ``` @@ -311,17 +308,17 @@ pretrain_weights: https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcne
-`transpose`算子在某些硬件上耗时验证 +The transpose operator is time-consuming on some hardware. -请使用`PicoDet-LCNet`模型,`transpose`较少。 +Please use `PicoDet-LCNet` model, which has fewer `transpose` operators.
-如何计算模型参数量。 +How to count model parameters. -可以将以下代码插入:[trainer.py](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/engine/trainer.py#L141) 来计算参数量。 +You can insert below code at [here](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/ppdet/engine/trainer.py#L141) to count learnable parameters. ```python params = sum([ @@ -333,8 +330,8 @@ print('params: ', params)
-## 引用PP-PicoDet -如果需要在你的研究中使用PP-PicoDet,请通过一下方式引用我们的技术报告: +## Cite PP-PicoDet +If you use PicoDet in your research, please cite our work by using the following BibTeX entry: ``` @misc{yu2021pppicodet, title={PP-PicoDet: A Better Real-Time Object Detector on Mobile Devices}, diff --git a/deploy/pphuman/datacollector.py b/deploy/pphuman/datacollector.py index cd459aad0680418bcd087d00662b0c310151ffc3..f1e3a21360fb871e26e53129cd8833cd123f1422 100644 --- a/deploy/pphuman/datacollector.py +++ b/deploy/pphuman/datacollector.py @@ -35,6 +35,9 @@ class Result(object): return self.res_dict[name] return None + def clear(self, name): + self.res_dict[name].clear() + class DataCollector(object): """ @@ -80,7 +83,6 @@ class DataCollector(object): ids = int(mot_item[0]) if ids not in self.collector: self.collector[ids] = copy.deepcopy(self.mots) - self.collector[ids]["frames"].append(frameid) self.collector[ids]["rects"].append([mot_item[2:]]) if attr_res: diff --git a/deploy/pphuman/mtmct.py b/deploy/pphuman/mtmct.py index 30f84724809753b577503b3bb59d50a21731ddb1..5e0abbd9d0c7be69120cac04b3c5794d9bb9c436 100644 --- a/deploy/pphuman/mtmct.py +++ b/deploy/pphuman/mtmct.py @@ -297,10 +297,9 @@ def distill_idfeat(mot_res): feature_new = feature_list #if available frames number is more than 200, take one frame data per 20 frames - if len(qualities_new) > 200: - skipf = 20 - else: - skipf = max(10, len(qualities_new) // 10) + skipf = 1 + if len(qualities_new) > 20: + skipf = 2 quality_skip = np.array(qualities_new[::skipf]) feature_skip = np.array(feature_new[::skipf]) diff --git a/deploy/pphuman/pipeline.py b/deploy/pphuman/pipeline.py index 4d6fa014ae783b61c4464b2e292c5d745a5297d1..9e23e0c0f8e34e963a1cf2597318bff527f991c3 100644 --- a/deploy/pphuman/pipeline.py +++ b/deploy/pphuman/pipeline.py @@ -587,7 +587,7 @@ class PipePredictor(object): if self.cfg['visual']: self.action_visual_helper.update(action_res) - if self.with_mtmct: + if self.with_mtmct and frame_id % 10 == 0: crop_input, img_qualities, rects = self.reid_predictor.crop_image_with_mot( frame, mot_res) if frame_id > self.warmup_frame: @@ -603,6 +603,8 @@ class PipePredictor(object): "rects": rects } self.pipeline_res.update(reid_res_dict, 'reid') + else: + self.pipeline_res.clear('reid') self.collector.append(frame_id, self.pipeline_res) diff --git a/deploy/python/infer.py b/deploy/python/infer.py index 84c643935f3d3b20acd910b0fa7412b46e7d1b72..3296e16e5a9612ba71d862d6843d9b9f576be1ff 100644 --- a/deploy/python/infer.py +++ b/deploy/python/infer.py @@ -231,7 +231,7 @@ class Detector(object): self.det_times.preprocess_time_s.end() # model prediction - result = self.predict(repeats=repeats) # warmup + result = self.predict(repeats=50) # warmup self.det_times.inference_time_s.start() result = self.predict(repeats=repeats) self.det_times.inference_time_s.end(repeats=repeats) @@ -296,7 +296,7 @@ class Detector(object): if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) out_path = os.path.join(self.output_dir, video_out_name) - fourcc = cv2.VideoWriter_fourcc(* 'mp4v') + fourcc = cv2.VideoWriter_fourcc(*'mp4v') writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height)) index = 1 while (1): @@ -790,7 +790,7 @@ def main(): if FLAGS.image_dir is None and FLAGS.image_file is not None: assert FLAGS.batch_size == 1, "batch_size should be 1, when image_file is not None" img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file) - detector.predict_image(img_list, FLAGS.run_benchmark, repeats=10) + detector.predict_image(img_list, FLAGS.run_benchmark, repeats=100) if not FLAGS.run_benchmark: detector.det_times.info(average=True) else: diff --git a/ppdet/data/source/category.py b/ppdet/data/source/category.py index 9390e54c4ce5dacce4674363689b629261c787c6..2c366968a439088bbec66ad2fc5ac182daea199c 100644 --- a/ppdet/data/source/category.py +++ b/ppdet/data/source/category.py @@ -39,6 +39,11 @@ def get_categories(metric_type, anno_file=None, arch=None): if arch == 'keypoint_arch': return (None, {'id': 'keypoint'}) + if anno_file == None or (not os.path.isfile(anno_file)): + logger.warning("anno_file '{}' is None or not set or not exist, " + "please recheck TrainDataset/EvalDataset/TestDataset.anno_path, " + "otherwise the default categories will be used by metric_type.".format(anno_file)) + if metric_type.lower() == 'coco' or metric_type.lower( ) == 'rbox' or metric_type.lower() == 'snipercoco': if anno_file and os.path.isfile(anno_file): @@ -55,8 +60,9 @@ def get_categories(metric_type, anno_file=None, arch=None): # anno file not exist, load default categories of COCO17 else: if metric_type.lower() == 'rbox': + logger.warning("metric_type: {}, load default categories of DOTA.".format(metric_type)) return _dota_category() - + logger.warning("metric_type: {}, load default categories of COCO.".format(metric_type)) return _coco17_category() elif metric_type.lower() == 'voc': @@ -77,6 +83,7 @@ def get_categories(metric_type, anno_file=None, arch=None): # anno file not exist, load default categories of # VOC all 20 categories else: + logger.warning("metric_type: {}, load default categories of VOC.".format(metric_type)) return _vocall_category() elif metric_type.lower() == 'oid': @@ -104,6 +111,7 @@ def get_categories(metric_type, anno_file=None, arch=None): return clsid2catid, catid2name # anno file not exist, load default category 'pedestrian'. else: + logger.warning("metric_type: {}, load default categories of pedestrian MOT.".format(metric_type)) return _mot_category(category='pedestrian') elif metric_type.lower() in ['kitti', 'bdd100kmot']: @@ -122,6 +130,7 @@ def get_categories(metric_type, anno_file=None, arch=None): return clsid2catid, catid2name # anno file not exist, load default categories of visdrone all 10 categories else: + logger.warning("metric_type: {}, load default categories of VisDrone.".format(metric_type)) return _visdrone_category() else: diff --git a/ppdet/metrics/mcmot_metrics.py b/ppdet/metrics/mcmot_metrics.py index 5bcfb923470a1f94a2bd951fb721221a8f339354..48d15c90e6512eb8943ca3ee224ac92b2795453c 100644 --- a/ppdet/metrics/mcmot_metrics.py +++ b/ppdet/metrics/mcmot_metrics.py @@ -26,8 +26,6 @@ from motmetrics.math_util import quiet_divide import numpy as np import pandas as pd -import paddle -import paddle.nn.functional as F from .metrics import Metric import motmetrics as mm import openpyxl @@ -311,7 +309,9 @@ class MCMOTEvaluator(object): self.gt_filename = os.path.join(self.data_root, '../', 'sequences', '{}.txt'.format(self.seq_name)) - + if not os.path.exists(self.gt_filename): + logger.warning("gt_filename '{}' of MCMOTEvaluator is not exist, so the MOTA will be -inf.") + def reset_accumulator(self): import motmetrics as mm mm.lap.default_solver = 'lap' diff --git a/ppdet/metrics/mot_metrics.py b/ppdet/metrics/mot_metrics.py index 85cba3630cd428478175ddc6347db4152d47a353..af2f7dd19c801cfe2c34d86c6c77ed4816b6fbec 100644 --- a/ppdet/metrics/mot_metrics.py +++ b/ppdet/metrics/mot_metrics.py @@ -22,8 +22,7 @@ import sys import math from collections import defaultdict import numpy as np -import paddle -import paddle.nn.functional as F + from ppdet.modeling.bbox_utils import bbox_iou_np_expand from .map_utils import ap_per_class from .metrics import Metric @@ -36,8 +35,10 @@ __all__ = ['MOTEvaluator', 'MOTMetric', 'JDEDetMetric', 'KITTIMOTMetric'] def read_mot_results(filename, is_gt=False, is_ignore=False): - valid_labels = {1} - ignore_labels = {2, 7, 8, 12} # only in motchallenge datasets like 'MOT16' + valid_label = [1] + ignore_labels = [2, 7, 8, 12] # only in motchallenge datasets like 'MOT16' + logger.info("In MOT16/17 dataset the valid_label of ground truth is '{}', " + "in other dataset it should be '0' for single classs MOT.".format(valid_label[0])) results_dict = dict() if os.path.isfile(filename): with open(filename, 'r') as f: @@ -50,12 +51,10 @@ def read_mot_results(filename, is_gt=False, is_ignore=False): continue results_dict.setdefault(fid, list()) - box_size = float(linelist[4]) * float(linelist[5]) - if is_gt: label = int(float(linelist[7])) mark = int(float(linelist[6])) - if mark == 0 or label not in valid_labels: + if mark == 0 or label not in valid_label: continue score = 1 elif is_ignore: @@ -118,6 +117,8 @@ class MOTEvaluator(object): assert self.data_type == 'mot' gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt') + if not os.path.exists(gt_filename): + logger.warning("gt_filename '{}' of MOTEvaluator is not exist, so the MOTA will be -inf.") self.gt_frame_dict = read_mot_results(gt_filename, is_gt=True) self.gt_ignore_frame_dict = read_mot_results( gt_filename, is_ignore=True) diff --git a/ppdet/modeling/architectures/meta_arch.py b/ppdet/modeling/architectures/meta_arch.py index 1f13c854072956395e8bb9bbb5b9ad9d43d2eeec..4ff84a97a61739e06f215f56a64daf0459e4a971 100644 --- a/ppdet/modeling/architectures/meta_arch.py +++ b/ppdet/modeling/architectures/meta_arch.py @@ -22,22 +22,23 @@ class BaseArch(nn.Layer): self.fuse_norm = False def load_meanstd(self, cfg_transform): - self.scale = 1. - self.mean = paddle.to_tensor([0.485, 0.456, 0.406]).reshape( - (1, 3, 1, 1)) - self.std = paddle.to_tensor([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1)) + scale = 1. + mean = np.array([0.485, 0.456, 0.406], dtype=np.float32) + std = np.array([0.229, 0.224, 0.225], dtype=np.float32) for item in cfg_transform: if 'NormalizeImage' in item: - self.mean = paddle.to_tensor(item['NormalizeImage'][ - 'mean']).reshape((1, 3, 1, 1)) - self.std = paddle.to_tensor(item['NormalizeImage'][ - 'std']).reshape((1, 3, 1, 1)) + mean = np.array( + item['NormalizeImage']['mean'], dtype=np.float32) + std = np.array(item['NormalizeImage']['std'], dtype=np.float32) if item['NormalizeImage'].get('is_scale', True): - self.scale = 1. / 255. + scale = 1. / 255. break if self.data_format == 'NHWC': - self.mean = self.mean.reshape(1, 1, 1, 3) - self.std = self.std.reshape(1, 1, 1, 3) + self.scale = paddle.to_tensor(scale / std).reshape((1, 1, 1, 3)) + self.bias = paddle.to_tensor(-mean / std).reshape((1, 1, 1, 3)) + else: + self.scale = paddle.to_tensor(scale / std).reshape((1, 3, 1, 1)) + self.bias = paddle.to_tensor(-mean / std).reshape((1, 3, 1, 1)) def forward(self, inputs): if self.data_format == 'NHWC': @@ -46,7 +47,7 @@ class BaseArch(nn.Layer): if self.fuse_norm: image = inputs['image'] - self.inputs['image'] = (image * self.scale - self.mean) / self.std + self.inputs['image'] = image * self.scale + self.bias self.inputs['im_shape'] = inputs['im_shape'] self.inputs['scale_factor'] = inputs['scale_factor'] else: @@ -66,8 +67,7 @@ class BaseArch(nn.Layer): outs = [] for inp in inputs_list: if self.fuse_norm: - self.inputs['image'] = ( - inp['image'] * self.scale - self.mean) / self.std + self.inputs['image'] = inp['image'] * self.scale + self.bias self.inputs['im_shape'] = inp['im_shape'] self.inputs['scale_factor'] = inp['scale_factor'] else: @@ -75,7 +75,7 @@ class BaseArch(nn.Layer): outs.append(self.get_pred()) # multi-scale test - if len(outs)>1: + if len(outs) > 1: out = self.merge_multi_scale_predictions(outs) else: out = outs[0] @@ -92,7 +92,9 @@ class BaseArch(nn.Layer): keep_top_k = self.bbox_post_process.nms.keep_top_k nms_threshold = self.bbox_post_process.nms.nms_threshold else: - raise Exception("Multi scale test only supports CascadeRCNN, FasterRCNN and MaskRCNN for now") + raise Exception( + "Multi scale test only supports CascadeRCNN, FasterRCNN and MaskRCNN for now" + ) final_boxes = [] all_scale_outs = paddle.concat([o['bbox'] for o in outs]).numpy() @@ -101,9 +103,11 @@ class BaseArch(nn.Layer): if np.count_nonzero(idxs) == 0: continue r = nms(all_scale_outs[idxs, 1:], nms_threshold) - final_boxes.append(np.concatenate([np.full((r.shape[0], 1), c), r], 1)) + final_boxes.append( + np.concatenate([np.full((r.shape[0], 1), c), r], 1)) out = np.concatenate(final_boxes) - out = np.concatenate(sorted(out, key=lambda e: e[1])[-keep_top_k:]).reshape((-1, 6)) + out = np.concatenate(sorted( + out, key=lambda e: e[1])[-keep_top_k:]).reshape((-1, 6)) out = { 'bbox': paddle.to_tensor(out), 'bbox_num': paddle.to_tensor(np.array([out.shape[0], ]))