diff --git a/configs/mot/deepsort/reid/deepsort_pcb_pyramid_r101.yml b/configs/mot/deepsort/reid/deepsort_pcb_pyramid_r101.yml index 42fe6eb2a29c96452404598804ef21a1101f3938..cbca94755fa97d1cdc8de9a55a39e7063de0417c 100644 --- a/configs/mot/deepsort/reid/deepsort_pcb_pyramid_r101.yml +++ b/configs/mot/deepsort/reid/deepsort_pcb_pyramid_r101.yml @@ -35,7 +35,7 @@ PCBPyramid: DeepSORTTracker: input_size: [64, 192] min_box_area: 0 # 0 means no need to filter out too small boxes - vertical_ratio: -1 # -1 means no need to filter out bboxes, usuallly set 1.6 for pedestrian + vertical_ratio: -1 # -1 means no need to filter out bboxes, usually set 1.6 for pedestrian budget: 100 max_age: 70 n_init: 3 diff --git a/configs/mot/deepsort/reid/deepsort_pplcnet.yml b/configs/mot/deepsort/reid/deepsort_pplcnet.yml index b8340d7016edd3aa8bd63012a3c5ee83a2b268d9..d50da28b2cadf80d42184d37b4428f564c2033ac 100644 --- a/configs/mot/deepsort/reid/deepsort_pplcnet.yml +++ b/configs/mot/deepsort/reid/deepsort_pplcnet.yml @@ -34,7 +34,7 @@ PPLCNetEmbedding: DeepSORTTracker: input_size: [64, 192] min_box_area: 0 # filter out too small boxes - vertical_ratio: -1 # filter out bboxes, usuallly set 1.6 for pedestrian + vertical_ratio: -1 # filter out bboxes, usually set 1.6 for pedestrian budget: 100 max_age: 70 n_init: 3 diff --git a/configs/mot/deepsort/reid/deepsort_pplcnet_vehicle.yml b/configs/mot/deepsort/reid/deepsort_pplcnet_vehicle.yml index e67614b58745bcf7a947185547831129d8a6ac16..6e07042d837eb3f6be29f6eef7cfb35275433fa3 100644 --- a/configs/mot/deepsort/reid/deepsort_pplcnet_vehicle.yml +++ b/configs/mot/deepsort/reid/deepsort_pplcnet_vehicle.yml @@ -34,7 +34,7 @@ PPLCNetEmbedding: DeepSORTTracker: input_size: [64, 192] min_box_area: 0 # 0 means no need to filter out too small boxes - vertical_ratio: -1 # -1 means no need to filter out bboxes, usuallly set 1.6 for pedestrian + vertical_ratio: -1 # -1 means no need to filter out bboxes, usually set 1.6 for pedestrian budget: 100 max_age: 70 n_init: 3 diff --git a/configs/mot/deepsort/reid/deepsort_resnet.yml b/configs/mot/deepsort/reid/deepsort_resnet.yml index 7ca06b3e1928968cbf6fb8fc55fd40f164954562..a9460586b6485b055d59efb7fe204f044edb2e21 100644 --- a/configs/mot/deepsort/reid/deepsort_resnet.yml +++ b/configs/mot/deepsort/reid/deepsort_resnet.yml @@ -33,7 +33,7 @@ ResNetEmbedding: DeepSORTTracker: input_size: [64, 192] min_box_area: 0 # filter out too small boxes - vertical_ratio: -1 # filter out bboxes, usuallly set 1.6 for pedestrian + vertical_ratio: -1 # filter out bboxes, usually set 1.6 for pedestrian budget: 100 max_age: 70 n_init: 3 diff --git a/configs/mot/fairmot/README.md b/configs/mot/fairmot/README.md index 8103a4f369681ea41ff4ac1062d841c5558d9fe4..adb20bb28120e2b03c55020e5f0ba25d4a7bfa57 100644 --- a/configs/mot/fairmot/README.md +++ b/configs/mot/fairmot/README.md @@ -44,7 +44,7 @@ PP-tracking provides an AI studio public project tutorial. Please refer to this | DLA-34 | 576x320 | 69.9 | 70.2 | 1044 | 8869 | 44898 | - |[model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_576x320.pdparams) | [config](./fairmot_dla34_30e_576x320.yml) | **Notes:** - - FairMOT DLA-34 used 2 GPUs for training and mini-batch size as 6 on each GPU, and trained for 30 epoches. + - FairMOT DLA-34 used 2 GPUs for training and mini-batch size as 6 on each GPU, and trained for 30 epochs. ### FairMOT enhance model @@ -62,8 +62,8 @@ PP-tracking provides an AI studio public project tutorial. Please refer to this **Notes:** - FairMOT enhance used 8 GPUs for training, and the crowdhuman dataset is added to the train-set during training. - - For FairMOT enhance DLA-34 the batch size is 16 on each GPU,and trained for 60 epoches. - - For FairMOT enhance HarDNet-85 the batch size is 10 on each GPU,and trained for 30 epoches. + - For FairMOT enhance DLA-34 the batch size is 16 on each GPU,and trained for 60 epochs. + - For FairMOT enhance HarDNet-85 the batch size is 10 on each GPU,and trained for 30 epochs. ### FairMOT light model ### Results on MOT-16 Test Set @@ -79,7 +79,7 @@ PP-tracking provides an AI studio public project tutorial. Please refer to this | HRNetV2-W18 | 576x320 | 65.3 | 64.8 | 4137 | 28860 | 163017 | - |[model](https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_576x320.pdparams) | [config](./fairmot_hrnetv2_w18_dlafpn_30e_576x320.yml) | **Notes:** - - FairMOT HRNetV2-W18 used 8 GPUs for training and mini-batch size as 4 on each GPU, and trained for 30 epoches. Only ImageNet pre-train model is used, and the optimizer adopts Momentum. The crowdhuman dataset is added to the train-set during training. + - FairMOT HRNetV2-W18 used 8 GPUs for training and mini-batch size as 4 on each GPU, and trained for 30 epochs. Only ImageNet pre-train model is used, and the optimizer adopts Momentum. The crowdhuman dataset is added to the train-set during training. ### FairMOT + BYTETracker diff --git a/configs/mot/mcfairmot/README.md b/configs/mot/mcfairmot/README.md index f5f0f7ee80f032e9f94f29dc559a305304faa81e..4009ddd87af15858324aa9cf32b2ea14060fbd00 100644 --- a/configs/mot/mcfairmot/README.md +++ b/configs/mot/mcfairmot/README.md @@ -31,8 +31,8 @@ PP-tracking provides an AI studio public project tutorial. Please refer to this | HRNetV2-W18 | 576x320 | 12.0 | 33.8 | 2178 | - |[model](https://paddledet.bj.bcebos.com/models/mot/mcfairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone.pdparams) | [config](./mcfairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone.yml) | **Notes:** - - MOTA is the average MOTA of 10 catecories in the VisDrone2019 MOT dataset, and its value is also equal to the average MOTA of all the evaluated video sequences. Here we provide the download [link](https://bj.bcebos.com/v1/paddledet/data/mot/visdrone_mcmot.zip) of the dataset. - - MCFairMOT used 4 GPUs for training 30 epoches. The batch size is 6 on each GPU for MCFairMOT DLA-34, and 8 for MCFairMOT HRNetV2-W18. + - MOTA is the average MOTA of 10 categories in the VisDrone2019 MOT dataset, and its value is also equal to the average MOTA of all the evaluated video sequences. Here we provide the download [link](https://bj.bcebos.com/v1/paddledet/data/mot/visdrone_mcmot.zip) of the dataset. + - MCFairMOT used 4 GPUs for training 30 epochs. The batch size is 6 on each GPU for MCFairMOT DLA-34, and 8 for MCFairMOT HRNetV2-W18. ### MCFairMOT Results on VisDrone Vehicle Val Set | backbone | input shape | MOTA | IDF1 | IDS | FPS | download | config | @@ -41,7 +41,7 @@ PP-tracking provides an AI studio public project tutorial. Please refer to this | HRNetV2-W18 | 1088x608 | 35.6 | 56.3 | 190 | - |[model](https://paddledet.bj.bcebos.com/models/mot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.pdparams) | [config](./mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml) | **Notes:** - - MOTA is the average MOTA of 4 catecories in the VisDrone Vehicle dataset, and this dataset is extracted from the VisDrone2019 MOT dataset, here we provide the download [link](https://bj.bcebos.com/v1/paddledet/data/mot/visdrone_mcmot_vehicle.zip). + - MOTA is the average MOTA of 4 categories in the VisDrone Vehicle dataset, and this dataset is extracted from the VisDrone2019 MOT dataset, here we provide the download [link](https://bj.bcebos.com/v1/paddledet/data/mot/visdrone_mcmot_vehicle.zip). - The tracker used in MCFairMOT model here is ByteTracker. ### MCFairMOT off-line quantization results on VisDrone Vehicle val-set diff --git a/configs/mot/pedestrian/tools/visdrone/visdrone2mot.py b/configs/mot/pedestrian/tools/visdrone/visdrone2mot.py index 333b6581a18b964942233f1c6765bd57655aec4b..0be2f1eb8fcb080738ccb45d01d6c20671381706 100644 --- a/configs/mot/pedestrian/tools/visdrone/visdrone2mot.py +++ b/configs/mot/pedestrian/tools/visdrone/visdrone2mot.py @@ -58,7 +58,7 @@ def genGtFile(seqPath, outPath, classes=[]): class_index = str(classes.index(line[7]) + 1) newLine.append(class_index) else: - newLine.append('1') # use permenant class '1' + newLine.append('1') # use permanent class '1' newLine.append('1') motLine.append(newLine) mkdir_if_missing(outPath) diff --git a/configs/mot/vehicle/tools/bdd100kmot/gen_bdd100kmot_vehicle.sh b/configs/mot/vehicle/tools/bdd100kmot/gen_bdd100kmot_vehicle.sh index 07b488df2f1a64e3b75fd8db518faaa983068b9b..b88b25180d9615b5277b1101f321c0d2704c3241 100644 --- a/configs/mot/vehicle/tools/bdd100kmot/gen_bdd100kmot_vehicle.sh +++ b/configs/mot/vehicle/tools/bdd100kmot/gen_bdd100kmot_vehicle.sh @@ -7,7 +7,7 @@ phasetrain=train phaseval=val classes=2,3,4,9,10 -# gen mot dataste +# gen mot dataset python bdd100k2mot.py --data_path=${data_path} --phase=${phasetrain} --classes=${classes} --img_dir=${img_dir} --label_dir=${label_dir} --save_path=${save_path} python bdd100k2mot.py --data_path=${data_path} --phase=${phaseval} --classes=${classes} --img_dir=${img_dir} --label_dir=${label_dir} --save_path=${save_path} diff --git a/configs/mot/vehicle/tools/visdrone/visdrone2mot.py b/configs/mot/vehicle/tools/visdrone/visdrone2mot.py index a165c66251cdc00a7c00918be46242fd081ac82a..a2fa200204f5656ce015d371715b0f7c2bf9366d 100644 --- a/configs/mot/vehicle/tools/visdrone/visdrone2mot.py +++ b/configs/mot/vehicle/tools/visdrone/visdrone2mot.py @@ -55,7 +55,7 @@ def genGtFile(seqPath, outPath, classes=[]): class_index = str(classes.index(line[7]) + 1) newLine.append(class_index) else: - newLine.append('1') # use permenant class '1' + newLine.append('1') # use permanent class '1' newLine.append('1') motLine.append(newLine) mkdir_if_missing(outPath) diff --git a/configs/ppyolo/README.md b/configs/ppyolo/README.md index 754fdd434a6722b640e441e6b565b8593bf86004..0dccfe418df412d4706edc1100204fd9931017e8 100644 --- a/configs/ppyolo/README.md +++ b/configs/ppyolo/README.md @@ -60,7 +60,7 @@ PP-YOLO and PP-YOLOv2 improved performance and speed of YOLOv3 with following me - PP-YOLO is trained on COCO train2017 dataset and evaluated on val2017 & test-dev2017 dataset,Box APtest is evaluation results of `mAP(IoU=0.5:0.95)`. - PP-YOLO used 8 GPUs for training and mini-batch size as 24 on each GPU, if GPU number and mini-batch size is changed, learning rate and iteration times should be adjusted according [FAQ](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/FAQ). -- PP-YOLO inference speed is tesed on single Tesla V100 with batch size as 1, CUDA 10.2, CUDNN 7.5.1, TensorRT 5.1.2.2 in TensorRT mode. +- PP-YOLO inference speed is tested on single Tesla V100 with batch size as 1, CUDA 10.2, CUDNN 7.5.1, TensorRT 5.1.2.2 in TensorRT mode. - PP-YOLO FP32 inference speed testing uses inference model exported by `tools/export_model.py` and benchmarked by running `depoly/python/infer.py` with `--run_benchmark`. All testing results do not contains the time cost of data reading and post-processing(NMS), which is same as [YOLOv4(AlexyAB)](https://github.com/AlexeyAB/darknet) in testing method. - TensorRT FP16 inference speed testing exclude the time cost of bounding-box decoding(`yolo_box`) part comparing with FP32 testing above, which means that data reading, bounding-box decoding and post-processing(NMS) is excluded(test method same as [YOLOv4(AlexyAB)](https://github.com/AlexeyAB/darknet) too) - If you set `--run_benchmark=True`,you should install these dependencies at first, `pip install pynvml psutil GPUtil`. @@ -176,7 +176,7 @@ CUDA_VISIBLE_DEVICES=0 python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_ CUDA_VISIBLE_DEVICES=0 python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_dir=demo ``` -### 4. Inferece deployment +### 4. Inference deployment For inference deployment or benchmard, model exported with `tools/export_model.py` should be used and perform inference with Paddle inference library with following commands: diff --git a/configs/ppyoloe/README.md b/configs/ppyoloe/README.md index 9b550b77e9e140acef259e597a072478454314d4..53cb4f74d30e0a0f319880b5622407145ca4e664 100644 --- a/configs/ppyoloe/README.md +++ b/configs/ppyoloe/README.md @@ -152,7 +152,7 @@ python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c con **Notes:** - If you need to evaluate while training, please add `--eval`. - PP-YOLOE+ supports mixed precision training, please add `--amp`. -- PaddleDetection supports multi-machine distribued training, you can refer to [DistributedTraining tutorial](../../docs/tutorials/DistributedTraining_en.md). +- PaddleDetection supports multi-machine distributed training, you can refer to [DistributedTraining tutorial](../../docs/tutorials/DistributedTraining_en.md). ### Evaluation @@ -259,7 +259,7 @@ trtexec --onnx=./ppyoloe_plus_crn_s_80e_coco.onnx --saveEngine=./ppyoloe_s_bs32. ### Deployment -PP-YOLOE can be deployed by following approches: +PP-YOLOE can be deployed by following approaches: - Paddle Inference [Python](../../deploy/python) & [C++](../../deploy/cpp) - [Paddle-TensorRT](../../deploy/TENSOR_RT.md) - [PaddleServing](https://github.com/PaddlePaddle/Serving) @@ -299,7 +299,7 @@ Model | AP | AP50 **PP-YOLOE** | **30.5** | **46.4** **Notes** -- Here, we use [VisDrone](https://github.com/VisDrone/VisDrone-Dataset) dataset, and to detect 9 objects including `person, bicycles, car, van, truck, tricyle, awning-tricyle, bus, motor`. +- Here, we use [VisDrone](https://github.com/VisDrone/VisDrone-Dataset) dataset, and to detect 9 objects including `person, bicycles, car, van, truck, tricycle, awning-tricycle, bus, motor`. - Above models trained using official default config, and load pretrained parameters on COCO dataset. - *Due to the limited time, more verification results will be supplemented in the future. You are also welcome to contribute to PP-YOLOE* diff --git a/configs/retinanet/README.md b/configs/retinanet/README.md index cd5f21ebf339c3c5a840406e9912901d406e9536..1259d47dddf5eb52e1499c7c63ae913d2f806c7f 100644 --- a/configs/retinanet/README.md +++ b/configs/retinanet/README.md @@ -13,7 +13,7 @@ **Notes:** - The ResNet50-FPN are trained on COCO train2017 with 8 GPUs. Both ResNet101-FPN and ResNet50-FPN with [FGD](../slim/distill/README.md) are trained on COCO train2017 with 4 GPUs. -- All above models are evaludated on val2017. Box AP=`mAP(IoU=0.5:0.95)`. +- All above models are evaluated on val2017. Box AP=`mAP(IoU=0.5:0.95)`. ## Citation diff --git a/configs/rotate/tools/generate_result.py b/configs/rotate/tools/generate_result.py index a103b9d63bf43dc134189dcb56ed358a15ef39ee..f8343ee5b368c796ef31b92977653843515bcf2a 100644 --- a/configs/rotate/tools/generate_result.py +++ b/configs/rotate/tools/generate_result.py @@ -248,7 +248,7 @@ def parse_args(): '--nms_thresh', type=float, default=0.1, - help='nms threshold whild merging results') + help='nms threshold while merging results') return parser.parse_args() diff --git a/configs/rotate/tools/prepare_data.py b/configs/rotate/tools/prepare_data.py index 7652edae27dc4bacdc30caa56b314b4b2c92188d..21488e2c7a5a604dad4a508f2c67ec6bf8cea37a 100644 --- a/configs/rotate/tools/prepare_data.py +++ b/configs/rotate/tools/prepare_data.py @@ -74,7 +74,7 @@ def parse_args(): nargs='+', type=float, default=[1.], - help='scales for multi-sclace training') + help='scales for multi-slice training') parser.add_argument( '--nproc', type=int, default=8, help='the processor number') diff --git a/configs/semi_det/README.md b/configs/semi_det/README.md index 5e583cbbdcbde55b1830020a39713337ba309a1e..996a1decfec0328420654d2d39d930ea2c7fdc0f 100644 --- a/configs/semi_det/README.md +++ b/configs/semi_det/README.md @@ -211,7 +211,7 @@ UnsupTrainDataset: ### 预训练配置 ```python -### pretrain and warmup config, choose one and coment another +### pretrain and warmup config, choose one and comment another pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams semi_start_iters: 5000 ema_start_iters: 3000 diff --git a/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_full.yml b/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_full.yml index d1199d2ca61a6754cf79eb211899653a0f169e7c..56087d5282ffa1ca2c656581b56285aaf41077cc 100644 --- a/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_full.yml +++ b/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_full.yml @@ -8,7 +8,7 @@ epochs: &epochs 24 weights: output/denseteacher_fcos_r50_fpn_coco_full/model_final -### pretrain and warmup config, choose one and coment another +### pretrain and warmup config, choose one and comment another pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams semi_start_iters: 5000 ema_start_iters: 3000 diff --git a/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi005.yml b/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi005.yml index 4f1b0ccbe08897d341128dac53653684e5994e03..85afb83bb94f7827e914ecf902fa41612d66770d 100644 --- a/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi005.yml +++ b/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi005.yml @@ -8,7 +8,7 @@ epochs: &epochs 240 # 480 will be better weights: output/denseteacher_fcos_r50_fpn_coco_semi005/model_final -### pretrain and warmup config, choose one and coment another +### pretrain and warmup config, choose one and comment another pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams semi_start_iters: 5000 ema_start_iters: 3000 diff --git a/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi010.yml b/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi010.yml index 9ed9246ec3b8ecb5369f54843a10e97486d2db4a..7b99859d9ef831a8730a0c6e699f9dd60ef46890 100644 --- a/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi010.yml +++ b/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi010.yml @@ -8,7 +8,7 @@ epochs: &epochs 240 weights: output/denseteacher_fcos_r50_fpn_coco_semi010/model_final -### pretrain and warmup config, choose one and coment another +### pretrain and warmup config, choose one and comment another pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams semi_start_iters: 5000 ema_start_iters: 3000