From 8c23516918d60356bb5842f699e14f9c12553601 Mon Sep 17 00:00:00 2001 From: chenxujun Date: Wed, 1 Feb 2023 13:55:46 +0800 Subject: [PATCH] Fix some words (#7659) --- configs/mot/deepsort/reid/deepsort_pcb_pyramid_r101.yml | 2 +- configs/mot/deepsort/reid/deepsort_pplcnet.yml | 2 +- configs/mot/deepsort/reid/deepsort_pplcnet_vehicle.yml | 2 +- configs/mot/deepsort/reid/deepsort_resnet.yml | 2 +- configs/mot/fairmot/README.md | 8 ++++---- configs/mot/mcfairmot/README.md | 6 +++--- configs/mot/pedestrian/tools/visdrone/visdrone2mot.py | 2 +- .../vehicle/tools/bdd100kmot/gen_bdd100kmot_vehicle.sh | 2 +- configs/mot/vehicle/tools/visdrone/visdrone2mot.py | 2 +- configs/ppyolo/README.md | 4 ++-- configs/ppyoloe/README.md | 6 +++--- configs/retinanet/README.md | 2 +- configs/rotate/tools/generate_result.py | 2 +- configs/rotate/tools/prepare_data.py | 2 +- configs/semi_det/README.md | 2 +- .../denseteacher/denseteacher_fcos_r50_fpn_coco_full.yml | 2 +- .../denseteacher_fcos_r50_fpn_coco_semi005.yml | 2 +- .../denseteacher_fcos_r50_fpn_coco_semi010.yml | 2 +- 18 files changed, 26 insertions(+), 26 deletions(-) diff --git a/configs/mot/deepsort/reid/deepsort_pcb_pyramid_r101.yml b/configs/mot/deepsort/reid/deepsort_pcb_pyramid_r101.yml index 42fe6eb2a..cbca94755 100644 --- a/configs/mot/deepsort/reid/deepsort_pcb_pyramid_r101.yml +++ b/configs/mot/deepsort/reid/deepsort_pcb_pyramid_r101.yml @@ -35,7 +35,7 @@ PCBPyramid: DeepSORTTracker: input_size: [64, 192] min_box_area: 0 # 0 means no need to filter out too small boxes - vertical_ratio: -1 # -1 means no need to filter out bboxes, usuallly set 1.6 for pedestrian + vertical_ratio: -1 # -1 means no need to filter out bboxes, usually set 1.6 for pedestrian budget: 100 max_age: 70 n_init: 3 diff --git a/configs/mot/deepsort/reid/deepsort_pplcnet.yml b/configs/mot/deepsort/reid/deepsort_pplcnet.yml index b8340d701..d50da28b2 100644 --- a/configs/mot/deepsort/reid/deepsort_pplcnet.yml +++ b/configs/mot/deepsort/reid/deepsort_pplcnet.yml @@ -34,7 +34,7 @@ PPLCNetEmbedding: DeepSORTTracker: input_size: [64, 192] min_box_area: 0 # filter out too small boxes - vertical_ratio: -1 # filter out bboxes, usuallly set 1.6 for pedestrian + vertical_ratio: -1 # filter out bboxes, usually set 1.6 for pedestrian budget: 100 max_age: 70 n_init: 3 diff --git a/configs/mot/deepsort/reid/deepsort_pplcnet_vehicle.yml b/configs/mot/deepsort/reid/deepsort_pplcnet_vehicle.yml index e67614b58..6e07042d8 100644 --- a/configs/mot/deepsort/reid/deepsort_pplcnet_vehicle.yml +++ b/configs/mot/deepsort/reid/deepsort_pplcnet_vehicle.yml @@ -34,7 +34,7 @@ PPLCNetEmbedding: DeepSORTTracker: input_size: [64, 192] min_box_area: 0 # 0 means no need to filter out too small boxes - vertical_ratio: -1 # -1 means no need to filter out bboxes, usuallly set 1.6 for pedestrian + vertical_ratio: -1 # -1 means no need to filter out bboxes, usually set 1.6 for pedestrian budget: 100 max_age: 70 n_init: 3 diff --git a/configs/mot/deepsort/reid/deepsort_resnet.yml b/configs/mot/deepsort/reid/deepsort_resnet.yml index 7ca06b3e1..a9460586b 100644 --- a/configs/mot/deepsort/reid/deepsort_resnet.yml +++ b/configs/mot/deepsort/reid/deepsort_resnet.yml @@ -33,7 +33,7 @@ ResNetEmbedding: DeepSORTTracker: input_size: [64, 192] min_box_area: 0 # filter out too small boxes - vertical_ratio: -1 # filter out bboxes, usuallly set 1.6 for pedestrian + vertical_ratio: -1 # filter out bboxes, usually set 1.6 for pedestrian budget: 100 max_age: 70 n_init: 3 diff --git a/configs/mot/fairmot/README.md b/configs/mot/fairmot/README.md index 8103a4f36..adb20bb28 100644 --- a/configs/mot/fairmot/README.md +++ b/configs/mot/fairmot/README.md @@ -44,7 +44,7 @@ PP-tracking provides an AI studio public project tutorial. Please refer to this | DLA-34 | 576x320 | 69.9 | 70.2 | 1044 | 8869 | 44898 | - |[model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_576x320.pdparams) | [config](./fairmot_dla34_30e_576x320.yml) | **Notes:** - - FairMOT DLA-34 used 2 GPUs for training and mini-batch size as 6 on each GPU, and trained for 30 epoches. + - FairMOT DLA-34 used 2 GPUs for training and mini-batch size as 6 on each GPU, and trained for 30 epochs. ### FairMOT enhance model @@ -62,8 +62,8 @@ PP-tracking provides an AI studio public project tutorial. Please refer to this **Notes:** - FairMOT enhance used 8 GPUs for training, and the crowdhuman dataset is added to the train-set during training. - - For FairMOT enhance DLA-34 the batch size is 16 on each GPU,and trained for 60 epoches. - - For FairMOT enhance HarDNet-85 the batch size is 10 on each GPU,and trained for 30 epoches. + - For FairMOT enhance DLA-34 the batch size is 16 on each GPU,and trained for 60 epochs. + - For FairMOT enhance HarDNet-85 the batch size is 10 on each GPU,and trained for 30 epochs. ### FairMOT light model ### Results on MOT-16 Test Set @@ -79,7 +79,7 @@ PP-tracking provides an AI studio public project tutorial. Please refer to this | HRNetV2-W18 | 576x320 | 65.3 | 64.8 | 4137 | 28860 | 163017 | - |[model](https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_576x320.pdparams) | [config](./fairmot_hrnetv2_w18_dlafpn_30e_576x320.yml) | **Notes:** - - FairMOT HRNetV2-W18 used 8 GPUs for training and mini-batch size as 4 on each GPU, and trained for 30 epoches. Only ImageNet pre-train model is used, and the optimizer adopts Momentum. The crowdhuman dataset is added to the train-set during training. + - FairMOT HRNetV2-W18 used 8 GPUs for training and mini-batch size as 4 on each GPU, and trained for 30 epochs. Only ImageNet pre-train model is used, and the optimizer adopts Momentum. The crowdhuman dataset is added to the train-set during training. ### FairMOT + BYTETracker diff --git a/configs/mot/mcfairmot/README.md b/configs/mot/mcfairmot/README.md index f5f0f7ee8..4009ddd87 100644 --- a/configs/mot/mcfairmot/README.md +++ b/configs/mot/mcfairmot/README.md @@ -31,8 +31,8 @@ PP-tracking provides an AI studio public project tutorial. Please refer to this | HRNetV2-W18 | 576x320 | 12.0 | 33.8 | 2178 | - |[model](https://paddledet.bj.bcebos.com/models/mot/mcfairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone.pdparams) | [config](./mcfairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone.yml) | **Notes:** - - MOTA is the average MOTA of 10 catecories in the VisDrone2019 MOT dataset, and its value is also equal to the average MOTA of all the evaluated video sequences. Here we provide the download [link](https://bj.bcebos.com/v1/paddledet/data/mot/visdrone_mcmot.zip) of the dataset. - - MCFairMOT used 4 GPUs for training 30 epoches. The batch size is 6 on each GPU for MCFairMOT DLA-34, and 8 for MCFairMOT HRNetV2-W18. + - MOTA is the average MOTA of 10 categories in the VisDrone2019 MOT dataset, and its value is also equal to the average MOTA of all the evaluated video sequences. Here we provide the download [link](https://bj.bcebos.com/v1/paddledet/data/mot/visdrone_mcmot.zip) of the dataset. + - MCFairMOT used 4 GPUs for training 30 epochs. The batch size is 6 on each GPU for MCFairMOT DLA-34, and 8 for MCFairMOT HRNetV2-W18. ### MCFairMOT Results on VisDrone Vehicle Val Set | backbone | input shape | MOTA | IDF1 | IDS | FPS | download | config | @@ -41,7 +41,7 @@ PP-tracking provides an AI studio public project tutorial. Please refer to this | HRNetV2-W18 | 1088x608 | 35.6 | 56.3 | 190 | - |[model](https://paddledet.bj.bcebos.com/models/mot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.pdparams) | [config](./mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml) | **Notes:** - - MOTA is the average MOTA of 4 catecories in the VisDrone Vehicle dataset, and this dataset is extracted from the VisDrone2019 MOT dataset, here we provide the download [link](https://bj.bcebos.com/v1/paddledet/data/mot/visdrone_mcmot_vehicle.zip). + - MOTA is the average MOTA of 4 categories in the VisDrone Vehicle dataset, and this dataset is extracted from the VisDrone2019 MOT dataset, here we provide the download [link](https://bj.bcebos.com/v1/paddledet/data/mot/visdrone_mcmot_vehicle.zip). - The tracker used in MCFairMOT model here is ByteTracker. ### MCFairMOT off-line quantization results on VisDrone Vehicle val-set diff --git a/configs/mot/pedestrian/tools/visdrone/visdrone2mot.py b/configs/mot/pedestrian/tools/visdrone/visdrone2mot.py index 333b6581a..0be2f1eb8 100644 --- a/configs/mot/pedestrian/tools/visdrone/visdrone2mot.py +++ b/configs/mot/pedestrian/tools/visdrone/visdrone2mot.py @@ -58,7 +58,7 @@ def genGtFile(seqPath, outPath, classes=[]): class_index = str(classes.index(line[7]) + 1) newLine.append(class_index) else: - newLine.append('1') # use permenant class '1' + newLine.append('1') # use permanent class '1' newLine.append('1') motLine.append(newLine) mkdir_if_missing(outPath) diff --git a/configs/mot/vehicle/tools/bdd100kmot/gen_bdd100kmot_vehicle.sh b/configs/mot/vehicle/tools/bdd100kmot/gen_bdd100kmot_vehicle.sh index 07b488df2..b88b25180 100644 --- a/configs/mot/vehicle/tools/bdd100kmot/gen_bdd100kmot_vehicle.sh +++ b/configs/mot/vehicle/tools/bdd100kmot/gen_bdd100kmot_vehicle.sh @@ -7,7 +7,7 @@ phasetrain=train phaseval=val classes=2,3,4,9,10 -# gen mot dataste +# gen mot dataset python bdd100k2mot.py --data_path=${data_path} --phase=${phasetrain} --classes=${classes} --img_dir=${img_dir} --label_dir=${label_dir} --save_path=${save_path} python bdd100k2mot.py --data_path=${data_path} --phase=${phaseval} --classes=${classes} --img_dir=${img_dir} --label_dir=${label_dir} --save_path=${save_path} diff --git a/configs/mot/vehicle/tools/visdrone/visdrone2mot.py b/configs/mot/vehicle/tools/visdrone/visdrone2mot.py index a165c6625..a2fa20020 100644 --- a/configs/mot/vehicle/tools/visdrone/visdrone2mot.py +++ b/configs/mot/vehicle/tools/visdrone/visdrone2mot.py @@ -55,7 +55,7 @@ def genGtFile(seqPath, outPath, classes=[]): class_index = str(classes.index(line[7]) + 1) newLine.append(class_index) else: - newLine.append('1') # use permenant class '1' + newLine.append('1') # use permanent class '1' newLine.append('1') motLine.append(newLine) mkdir_if_missing(outPath) diff --git a/configs/ppyolo/README.md b/configs/ppyolo/README.md index 754fdd434..0dccfe418 100644 --- a/configs/ppyolo/README.md +++ b/configs/ppyolo/README.md @@ -60,7 +60,7 @@ PP-YOLO and PP-YOLOv2 improved performance and speed of YOLOv3 with following me - PP-YOLO is trained on COCO train2017 dataset and evaluated on val2017 & test-dev2017 dataset,Box APtest is evaluation results of `mAP(IoU=0.5:0.95)`. - PP-YOLO used 8 GPUs for training and mini-batch size as 24 on each GPU, if GPU number and mini-batch size is changed, learning rate and iteration times should be adjusted according [FAQ](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/FAQ). -- PP-YOLO inference speed is tesed on single Tesla V100 with batch size as 1, CUDA 10.2, CUDNN 7.5.1, TensorRT 5.1.2.2 in TensorRT mode. +- PP-YOLO inference speed is tested on single Tesla V100 with batch size as 1, CUDA 10.2, CUDNN 7.5.1, TensorRT 5.1.2.2 in TensorRT mode. - PP-YOLO FP32 inference speed testing uses inference model exported by `tools/export_model.py` and benchmarked by running `depoly/python/infer.py` with `--run_benchmark`. All testing results do not contains the time cost of data reading and post-processing(NMS), which is same as [YOLOv4(AlexyAB)](https://github.com/AlexeyAB/darknet) in testing method. - TensorRT FP16 inference speed testing exclude the time cost of bounding-box decoding(`yolo_box`) part comparing with FP32 testing above, which means that data reading, bounding-box decoding and post-processing(NMS) is excluded(test method same as [YOLOv4(AlexyAB)](https://github.com/AlexeyAB/darknet) too) - If you set `--run_benchmark=True`,you should install these dependencies at first, `pip install pynvml psutil GPUtil`. @@ -176,7 +176,7 @@ CUDA_VISIBLE_DEVICES=0 python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_ CUDA_VISIBLE_DEVICES=0 python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_dir=demo ``` -### 4. Inferece deployment +### 4. Inference deployment For inference deployment or benchmard, model exported with `tools/export_model.py` should be used and perform inference with Paddle inference library with following commands: diff --git a/configs/ppyoloe/README.md b/configs/ppyoloe/README.md index 9b550b77e..53cb4f74d 100644 --- a/configs/ppyoloe/README.md +++ b/configs/ppyoloe/README.md @@ -152,7 +152,7 @@ python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c con **Notes:** - If you need to evaluate while training, please add `--eval`. - PP-YOLOE+ supports mixed precision training, please add `--amp`. -- PaddleDetection supports multi-machine distribued training, you can refer to [DistributedTraining tutorial](../../docs/tutorials/DistributedTraining_en.md). +- PaddleDetection supports multi-machine distributed training, you can refer to [DistributedTraining tutorial](../../docs/tutorials/DistributedTraining_en.md). ### Evaluation @@ -259,7 +259,7 @@ trtexec --onnx=./ppyoloe_plus_crn_s_80e_coco.onnx --saveEngine=./ppyoloe_s_bs32. ### Deployment -PP-YOLOE can be deployed by following approches: +PP-YOLOE can be deployed by following approaches: - Paddle Inference [Python](../../deploy/python) & [C++](../../deploy/cpp) - [Paddle-TensorRT](../../deploy/TENSOR_RT.md) - [PaddleServing](https://github.com/PaddlePaddle/Serving) @@ -299,7 +299,7 @@ Model | AP | AP50 **PP-YOLOE** | **30.5** | **46.4** **Notes** -- Here, we use [VisDrone](https://github.com/VisDrone/VisDrone-Dataset) dataset, and to detect 9 objects including `person, bicycles, car, van, truck, tricyle, awning-tricyle, bus, motor`. +- Here, we use [VisDrone](https://github.com/VisDrone/VisDrone-Dataset) dataset, and to detect 9 objects including `person, bicycles, car, van, truck, tricycle, awning-tricycle, bus, motor`. - Above models trained using official default config, and load pretrained parameters on COCO dataset. - *Due to the limited time, more verification results will be supplemented in the future. You are also welcome to contribute to PP-YOLOE* diff --git a/configs/retinanet/README.md b/configs/retinanet/README.md index cd5f21ebf..1259d47dd 100644 --- a/configs/retinanet/README.md +++ b/configs/retinanet/README.md @@ -13,7 +13,7 @@ **Notes:** - The ResNet50-FPN are trained on COCO train2017 with 8 GPUs. Both ResNet101-FPN and ResNet50-FPN with [FGD](../slim/distill/README.md) are trained on COCO train2017 with 4 GPUs. -- All above models are evaludated on val2017. Box AP=`mAP(IoU=0.5:0.95)`. +- All above models are evaluated on val2017. Box AP=`mAP(IoU=0.5:0.95)`. ## Citation diff --git a/configs/rotate/tools/generate_result.py b/configs/rotate/tools/generate_result.py index a103b9d63..f8343ee5b 100644 --- a/configs/rotate/tools/generate_result.py +++ b/configs/rotate/tools/generate_result.py @@ -248,7 +248,7 @@ def parse_args(): '--nms_thresh', type=float, default=0.1, - help='nms threshold whild merging results') + help='nms threshold while merging results') return parser.parse_args() diff --git a/configs/rotate/tools/prepare_data.py b/configs/rotate/tools/prepare_data.py index 7652edae2..21488e2c7 100644 --- a/configs/rotate/tools/prepare_data.py +++ b/configs/rotate/tools/prepare_data.py @@ -74,7 +74,7 @@ def parse_args(): nargs='+', type=float, default=[1.], - help='scales for multi-sclace training') + help='scales for multi-slice training') parser.add_argument( '--nproc', type=int, default=8, help='the processor number') diff --git a/configs/semi_det/README.md b/configs/semi_det/README.md index 5e583cbbd..996a1decf 100644 --- a/configs/semi_det/README.md +++ b/configs/semi_det/README.md @@ -211,7 +211,7 @@ UnsupTrainDataset: ### 预训练配置 ```python -### pretrain and warmup config, choose one and coment another +### pretrain and warmup config, choose one and comment another pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams semi_start_iters: 5000 ema_start_iters: 3000 diff --git a/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_full.yml b/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_full.yml index d1199d2ca..56087d528 100644 --- a/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_full.yml +++ b/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_full.yml @@ -8,7 +8,7 @@ epochs: &epochs 24 weights: output/denseteacher_fcos_r50_fpn_coco_full/model_final -### pretrain and warmup config, choose one and coment another +### pretrain and warmup config, choose one and comment another pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams semi_start_iters: 5000 ema_start_iters: 3000 diff --git a/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi005.yml b/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi005.yml index 4f1b0ccbe..85afb83bb 100644 --- a/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi005.yml +++ b/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi005.yml @@ -8,7 +8,7 @@ epochs: &epochs 240 # 480 will be better weights: output/denseteacher_fcos_r50_fpn_coco_semi005/model_final -### pretrain and warmup config, choose one and coment another +### pretrain and warmup config, choose one and comment another pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams semi_start_iters: 5000 ema_start_iters: 3000 diff --git a/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi010.yml b/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi010.yml index 9ed9246ec..7b99859d9 100644 --- a/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi010.yml +++ b/configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi010.yml @@ -8,7 +8,7 @@ epochs: &epochs 240 weights: output/denseteacher_fcos_r50_fpn_coco_semi010/model_final -### pretrain and warmup config, choose one and coment another +### pretrain and warmup config, choose one and comment another pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams semi_start_iters: 5000 ema_start_iters: 3000 -- GitLab