From f0a5a4b7952939cb2a926e35e617fe9cadf69f18 Mon Sep 17 00:00:00 2001 From: Feng Ni Date: Thu, 12 May 2022 12:37:05 +0800 Subject: [PATCH] [cherry-pick] fix pphuman visualize_video (#5949) --- configs/mot/bytetrack/README_cn.md | 2 +- deploy/pphuman/docs/mot.md | 4 ++-- deploy/pphuman/pipeline.py | 5 +++-- deploy/pptracking/python/README.md | 12 ++++++------ 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/configs/mot/bytetrack/README_cn.md b/configs/mot/bytetrack/README_cn.md index 242319c08..477e72e68 100644 --- a/configs/mot/bytetrack/README_cn.md +++ b/configs/mot/bytetrack/README_cn.md @@ -86,7 +86,7 @@ CUDA_VISIBLE_DEVICES=0 python tools/export_model.py -c configs/mot/deepsort/reid ### 4. 用导出的模型基于Python去预测 ```bash -python deploy/pptracking/python/mot_sde_infer.py --model_dir=output_inference/ppyoloe_crn_l_36e_640x640_mot17half/ --tracker_config=tracker_config.yml --video_file={your video name}.mp4 --device=GPU --scaled=True --save_mot_txts +python deploy/pptracking/python/mot_sde_infer.py --model_dir=output_inference/ppyoloe_crn_l_36e_640x640_mot17half/ --tracker_config=deploy/pptracking/python/tracker_config.yml --video_file={your video name}.mp4 --device=GPU --scaled=True --save_mot_txts ``` **注意:** - 跟踪模型是对视频进行预测,不支持单张图的预测,默认保存跟踪结果可视化后的视频,可添加`--save_mot_txts`(对每个视频保存一个txt)或`--save_mot_txt_per_img`(对每张图片保存一个txt)表示保存跟踪结果的txt文件,或`--save_images`表示保存跟踪结果可视化图片。 diff --git a/deploy/pphuman/docs/mot.md b/deploy/pphuman/docs/mot.md index 0552c4df2..acfed453a 100644 --- a/deploy/pphuman/docs/mot.md +++ b/deploy/pphuman/docs/mot.md @@ -34,9 +34,9 @@ python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml \ python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml \ --video_file=test_video.mp4 \ --device=gpu \ - --model_dir det=ppyoloe/ --do_entrance_counting \ - --draw_center_traj + --draw_center_traj \ + --model_dir det=ppyoloe/ ``` **注意:** diff --git a/deploy/pphuman/pipeline.py b/deploy/pphuman/pipeline.py index 8078ada54..0e6796882 100644 --- a/deploy/pphuman/pipeline.py +++ b/deploy/pphuman/pipeline.py @@ -537,8 +537,9 @@ class PipePredictor(object): self.pipe_timer.total_time.end() if self.cfg['visual']: _, _, fps = self.pipe_timer.get_total_time() - im = self.visualize_video(frame, mot_res, frame_id, - fps) # visualize + im = self.visualize_video(frame, mot_res, frame_id, fps, + entrance, records, + center_traj) # visualize writer.write(im) if self.file_name is None: # use camera_id cv2.imshow('PPHuman', im) diff --git a/deploy/pptracking/python/README.md b/deploy/pptracking/python/README.md index d5c34cdf5..46ff59a31 100644 --- a/deploy/pptracking/python/README.md +++ b/deploy/pptracking/python/README.md @@ -89,9 +89,9 @@ CUDA_VISIBLE_DEVICES=0 python tools/export_model.py -c configs/mot/deepsort/reid wget https://bj.bcebos.com/v1/paddledet/data/mot/demo/mot17_demo.mp4 # 用导出的PPYOLOv2行人检测模型和PPLCNet ReID模型 -python deploy/pptracking/python/mot_sde_infer.py --model_dir=output_inference/ppyolov2_r50vd_dcn_365e_640x640_mot17half/ --reid_model_dir=output_inference/deepsort_pplcnet/ --tracker_config=tracker_config.yml --video_file=mot17_demo.mp4 --device=GPU --threshold=0.5 --save_mot_txts --save_images +python deploy/pptracking/python/mot_sde_infer.py --model_dir=output_inference/ppyolov2_r50vd_dcn_365e_640x640_mot17half/ --reid_model_dir=output_inference/deepsort_pplcnet/ --tracker_config=deploy/pptracking/python/tracker_config.yml --video_file=mot17_demo.mp4 --device=GPU --threshold=0.5 --save_mot_txts --save_images # 或用导出的PPYOLOe行人检测模型和PPLCNet ReID模型 -python deploy/pptracking/python/mot_sde_infer.py --model_dir=output_inference/ppyoloe_crn_l_36e_640x640_mot17half/ --reid_model_dir=output_inference/deepsort_pplcnet/ --tracker_config=tracker_config.yml --video_file=mot17_demo.mp4 --device=GPU --threshold=0.5 --save_mot_txts --save_images +python deploy/pptracking/python/mot_sde_infer.py --model_dir=output_inference/ppyoloe_crn_l_36e_640x640_mot17half/ --reid_model_dir=output_inference/deepsort_pplcnet/ --tracker_config=deploy/pptracking/python/tracker_config.yml --video_file=mot17_demo.mp4 --device=GPU --threshold=0.5 --save_mot_txts --save_images ``` ### 2.3 用导出的模型基于Python去预测车辆跟踪 @@ -108,10 +108,10 @@ wget https://paddledet.bj.bcebos.com/models/mot/deepsort/deepsort_pplcnet_vehicl tar -xvf deepsort_pplcnet_vehicle.tar # 用导出的PicoDet车辆检测模型和PPLCNet车辆ReID模型 -python deploy/pptracking/python/mot_sde_infer.py --model_dir=picodet_l_640_aic21mtmct_vehicle/ --reid_model_dir=deepsort_pplcnet_vehicle/ --tracker_config=tracker_config.yml --device=GPU --threshold=0.5 --video_file={your video}.mp4 --save_mot_txts --save_images +python deploy/pptracking/python/mot_sde_infer.py --model_dir=picodet_l_640_aic21mtmct_vehicle/ --reid_model_dir=deepsort_pplcnet_vehicle/ --tracker_config=deploy/pptracking/python/tracker_config.yml --device=GPU --threshold=0.5 --video_file={your video}.mp4 --save_mot_txts --save_images # 用导出的PP-YOLOv2车辆检测模型和PPLCNet车辆ReID模型 -python deploy/pptracking/python/mot_sde_infer.py --model_dir=ppyolov2_r50vd_dcn_365e_aic21mtmct_vehicle/ --reid_model_dir=deepsort_pplcnet_vehicle/ --tracker_config=tracker_config.yml --device=GPU --threshold=0.5 --video_file={your video}.mp4 --save_mot_txts --save_images +python deploy/pptracking/python/mot_sde_infer.py --model_dir=ppyolov2_r50vd_dcn_365e_aic21mtmct_vehicle/ --reid_model_dir=deepsort_pplcnet_vehicle/ --tracker_config=deploy/pptracking/python/tracker_config.yml --device=GPU --threshold=0.5 --video_file={your video}.mp4 --save_mot_txts --save_images ``` **注意:** @@ -135,10 +135,10 @@ CUDA_VISIBLE_DEVICES=0 python tools/export_model.py -c configs/mot/deepsort/dete wget https://bj.bcebos.com/v1/paddledet/data/mot/demo/mot17_demo.mp4 # 用导出的PPYOLOe行人检测模型 -python deploy/pptracking/python/mot_sde_infer.py --model_dir=output_inference/ppyoloe_crn_l_36e_640x640_mot17half/ --tracker_config=tracker_config.yml --video_file=mot17_demo.mp4 --device=GPU --threshold=0.5 --save_mot_txts --save_images +python deploy/pptracking/python/mot_sde_infer.py --model_dir=output_inference/ppyoloe_crn_l_36e_640x640_mot17half/ --tracker_config=deploy/pptracking/python/tracker_config.yml --video_file=mot17_demo.mp4 --device=GPU --threshold=0.5 --save_mot_txts --save_images # 用导出的PPYOLOe行人检测模型和PPLCNet ReID模型 -python deploy/pptracking/python/mot_sde_infer.py --model_dir=output_inference/ppyoloe_crn_l_36e_640x640_mot17half/ --reid_model_dir=output_inference/deepsort_pplcnet/ --tracker_config=tracker_config.yml --video_file=mot17_demo.mp4 --device=GPU --threshold=0.5 --save_mot_txts --save_images +python deploy/pptracking/python/mot_sde_infer.py --model_dir=output_inference/ppyoloe_crn_l_36e_640x640_mot17half/ --reid_model_dir=output_inference/deepsort_pplcnet/ --tracker_config=deploy/pptracking/python/tracker_config.yml --video_file=mot17_demo.mp4 --device=GPU --threshold=0.5 --save_mot_txts --save_images ``` **注意:** - ByteTrack模型是加载导出的检测器和单独配置的`--tracker_config`文件运行的,为了实时跟踪所以不需要reid模型,`--reid_model_dir`表示reid导出模型的路径,默认为空,加不加具体视效果而定; -- GitLab