diff --git a/deploy/pphuman/README.md b/deploy/pphuman/README.md index f5d324e7df4ec9f701323b0dc1db24b254333dbb..dbe3491c8934eac06acb20edff80ec3537f599f6 100644 --- a/deploy/pphuman/README.md +++ b/deploy/pphuman/README.md @@ -29,7 +29,8 @@ cd PaddleDetection pip install -r requirements.txt ``` -详细安装文档参考[文档](../../docs/tutorials/INSTALL_cn.md) +1. 详细安装文档参考[文档](../../docs/tutorials/INSTALL_cn.md) +2. 如果需要TensorRT推理加速(测速方式),请安装带`TensorRT版本Paddle`。您可以从[Paddle安装包](https://paddleinference.paddlepaddle.org.cn/v2.2/user_guides/download_lib.html#python)下载安装,或者按照[指导文档](https://www.paddlepaddle.org.cn/inference/master/optimize/paddle_trt.html)使用docker或自编译方式准备Paddle环境。 ## 二、快速开始 @@ -93,21 +94,23 @@ ATTR: ``` # 行人检测,指定配置文件路径和测试图片 -python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --image_file=test_image.jpg --device=gpu +python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --image_file=test_image.jpg --device=gpu [--run_mode trt_fp16] # 行人跟踪,指定配置文件路径和测试视频 -python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu +python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu [--run_mode trt_fp16] # 行人跟踪,指定配置文件路径,模型路径和测试视频 # 命令行中指定的模型路径优先级高于配置文件 -python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu --model_dir det=ppyoloe/ +python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu --model_dir det=ppyoloe/ [--run_mode trt_fp16] # 行人属性识别,指定配置文件路径和测试视频 -python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu --enable_attr=True +python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu --enable_attr=True [--run_mode trt_fp16] # 行为识别,指定配置文件路径和测试视频 -python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu --enable_action=True +python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu --enable_action=True [--run_mode trt_fp16] +# 行人跨境跟踪,指定配置文件路径和测试视频列表文件夹 +python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_dir=mtmct_dir/ --device=gpu [--run_mode trt_fp16] ``` 其他用法请参考[子任务文档](./docs) diff --git a/deploy/pphuman/README_en.md b/deploy/pphuman/README_en.md index b3531f0afd1b6816b31fda1313c5e542dab7c7d1..984a602999f81f48ed3a03a48b571313e811187b 100644 --- a/deploy/pphuman/README_en.md +++ b/deploy/pphuman/README_en.md @@ -28,7 +28,8 @@ cd PaddleDetection pip install -r requirements.txt ``` -For details of the installation, please refer to this [document](../../docs/tutorials/INSTALL.md) +1. For details of the installation, please refer to this [document](../../docs/tutorials/INSTALL.md) +2. Please install `Paddle-TensorRT` if your want speedup inference by TensorRT. You can download the whl package from [Paddle-whl-list](https://paddleinference.paddlepaddle.org.cn/v2.2/user_guides/download_lib.html#python), or prepare the envs by yourself follows the [Install-Guide](https://www.paddlepaddle.org.cn/inference/master/optimize/paddle_trt.html). ## II. Quick Start @@ -91,23 +92,23 @@ ATTR: ``` # Pedestrian detection. Specify the config file path and test images -python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --image_file=test_image.jpg --device=gpu +python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --image_file=test_image.jpg --device=gpu [--run_mode trt_fp16] # Pedestrian tracking. Specify the config file path and test videos -python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu +python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu [--run_mode trt_fp16] # Pedestrian tracking. Specify the config file path, the model path and test videos # The model path specified on the command line prioritizes over the config file -python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu --model_dir det=ppyoloe/ +python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu --model_dir det=ppyoloe/ [--run_mode trt_fp16] # Attribute recognition. Specify the config file path and test videos -python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu --enable_attr=True +python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu --enable_attr=True [--run_mode trt_fp16] # Action Recognition. Specify the config file path and test videos -python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu --enable_action=True +python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_file=test_video.mp4 --device=gpu --enable_action=True [--run_mode trt_fp16] -# Multi-Camera pedestrian tracking. Specify the config file path and test videos -python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_dir=test_video_dir/ --device=gpu +# Pedestrian Multi-Target Multi-Camera tracking. Specify the config file path and the directory of test videos +python deploy/pphuman/pipeline.py --config deploy/pphuman/config/infer_cfg.yml --video_dir=mtmct_dir/ --device=gpu [--run_mode trt_fp16] ``` diff --git a/deploy/pphuman/mtmct.py b/deploy/pphuman/mtmct.py index 5e0abbd9d0c7be69120cac04b3c5794d9bb9c436..5c7494edf1327955e045a8c82a2458bbda0a23de 100644 --- a/deploy/pphuman/mtmct.py +++ b/deploy/pphuman/mtmct.py @@ -113,8 +113,8 @@ def save_mtmct_vis_results(camera_results, captures, output_dir): cid = camera_ids[idx] basename = os.path.basename(video_file) video_out_name = "vis_" + basename - print("Start visualizing output video: {}".format(video_out_name)) out_path = os.path.join(save_dir, video_out_name) + print("Start visualizing output video: {}".format(out_path)) # Get Video info : resolution, fps, frame count width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) diff --git a/deploy/pphuman/pipeline.py b/deploy/pphuman/pipeline.py index a9543357107cb48a6e7d4c3fbfdc88a8e11ff49e..8078ada54b38c0bd6d0f9ab22221bbbc13668836 100644 --- a/deploy/pphuman/pipeline.py +++ b/deploy/pphuman/pipeline.py @@ -109,8 +109,9 @@ class Pipeline(object): self.input = self._parse_input(image_file, image_dir, video_file, video_dir, camera_id) if self.multi_camera: - self.predictor = [ - PipePredictor( + self.predictor = [] + for name in self.input: + predictor_item = PipePredictor( cfg, is_video=True, multi_camera=True, @@ -123,8 +124,10 @@ class Pipeline(object): trt_opt_shape=trt_opt_shape, cpu_threads=cpu_threads, enable_mkldnn=enable_mkldnn, - output_dir=output_dir) for i in self.input - ] + output_dir=output_dir) + predictor_item.set_file_name(name) + self.predictor.append(predictor_item) + else: self.predictor = PipePredictor( cfg,