未验证 提交 7e8d1fa2 编写于 作者: Z zhiboniu 提交者: GitHub

multi_thread & multi_rtsp input (#6834)

* multi_thread & multi_rtsp input; test=document_fix

* update rtsp docs; test=document_fix

* update savename,add threadid to name;test=document_fix
上级 2f0bfdb0
......@@ -69,6 +69,12 @@ def argsparser():
type=str,
default=None,
help="Dir of video file, `video_file` has a higher priority.")
parser.add_argument(
"--rtsp",
type=str,
nargs='+',
default=None,
help="list of rtsp inputs, for one or multiple rtsp input.")
parser.add_argument(
"--camera_id",
type=int,
......
......@@ -123,10 +123,13 @@ python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pph
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml -o SKELETON_ACTION.enbale=True --video_file=test_video.mp4 --device=gpu
```
3. 对rtsp流的支持,video_file后面的视频地址更换为rtsp流地址,示例如下:
3. 对rtsp流的支持,使用--rtsp RTSP [RTSP ...]参数指定一路或者多路rtsp视频流,如果是多路地址中间用空格隔开。(或者video_file后面的视频地址直接更换为rtsp流地址),示例如下:
```
# 例:行人属性识别,指定配置文件路径和测试视频
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_human_attr.yml -o visual=False --video_file=rtsp://[YOUR_RTSP_SITE] --device=gpu
# 例:行人属性识别,单路视频流
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_human_attr.yml -o visual=False --rtsp rtsp://[YOUR_RTSP_SITE] --device=gpu
# 例:行人属性识别,多路视频流
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_human_attr.yml -o visual=False --rtsp rtsp://[YOUR_RTSP_SITE1] rtsp://[YOUR_RTSP_SITE2] --device=gpu
```
### Jetson部署说明
......@@ -149,7 +152,8 @@ python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infe
| -o | Option | 覆盖配置文件中对应的配置 |
| --image_file | Option | 需要预测的图片 |
| --image_dir | Option | 要预测的图片文件夹路径 |
| --video_file | Option | 需要预测的视频,或者rtsp流地址 |
| --video_file | Option | 需要预测的视频,或者rtsp流地址(推荐使用rtsp参数) |
| --rtsp | Option | rtsp视频流地址,支持一路或者多路同时输入 |
| --camera_id | Option | 用来预测的摄像头ID,默认为-1(表示不使用摄像头预测,可设置为:0 - (摄像头数目-1) ),预测过程中在可视化界面按`q`退出输出预测结果到:output/output.mp4|
| --device | Option | 运行时的设备,可选择`CPU/GPU/XPU`,默认为`CPU`|
| --output_dir | Option|可视化结果保存的根目录,默认为output/|
......
......@@ -129,10 +129,13 @@ python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infe
```
3. 对rtsp流的支持,video_file后面的视频地址更换为rtsp流地址,示例如下:
3. 对rtsp流的支持,使用--rtsp RTSP [RTSP ...]参数指定一路或者多路rtsp视频流,如果是多路地址中间用空格隔开。(或者video_file后面的视频地址直接更换为rtsp流地址),示例如下:
```
# 例:车辆属性识别,指定配置文件路径和测试视频
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_vehicle_attr.yml -o visual=False --video_file=rtsp://[YOUR_RTSP_SITE] --device=gpu
# 例:行人属性识别,单路视频流
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_vehicle_attr.yml -o visual=False --rtsp rtsp://[YOUR_RTSP_SITE] --device=gpu
# 例:行人属性识别,多路视频流
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_vehicle_attr.yml -o visual=False --rtsp rtsp://[YOUR_RTSP_SITE1] rtsp://[YOUR_RTSP_SITE2] --device=gpu
```
### Jetson部署说明
......@@ -156,6 +159,7 @@ python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infe
| --image_file | Option | 需要预测的图片 |
| --image_dir | Option | 要预测的图片文件夹路径 |
| --video_file | Option | 需要预测的视频,或者rtsp流地址 |
| --rtsp | Option | rtsp视频流地址,支持一路或者多路同时输入 |
| --camera_id | Option | 用来预测的摄像头ID,默认为-1(表示不使用摄像头预测,可设置为:0 - (摄像头数目-1) ),预测过程中在可视化界面按`q`退出输出预测结果到:output/output.mp4|
| --device | Option | 运行时的设备,可选择`CPU/GPU/XPU`,默认为`CPU`|
| --output_dir | Option|可视化结果保存的根目录,默认为output/|
......
......@@ -73,7 +73,7 @@ class Pipeline(object):
self.vis_result = cfg['visual']
self.input = self._parse_input(args.image_file, args.image_dir,
args.video_file, args.video_dir,
args.camera_id)
args.camera_id, args.rtsp)
if self.multi_camera:
self.predictor = []
for name in self.input:
......@@ -85,10 +85,10 @@ class Pipeline(object):
else:
self.predictor = PipePredictor(args, cfg, self.is_video)
if self.is_video:
self.predictor.set_file_name(args.video_file)
self.predictor.set_file_name(self.input)
def _parse_input(self, image_file, image_dir, video_file, video_dir,
camera_id):
camera_id, rtsp):
# parse input as is_video and multi_camera
......@@ -115,6 +115,16 @@ class Pipeline(object):
input = videof[0]
self.is_video = True
elif rtsp is not None:
if len(rtsp) > 1:
rtsp = [rtsp_item for rtsp_item in rtsp if 'rtsp' in rtsp_item]
self.multi_camera = True
input = rtsp
else:
self.multi_camera = False
input = rtsp[0]
self.is_video = True
elif camera_id != -1:
self.multi_camera = False
input = camera_id
......@@ -127,6 +137,37 @@ class Pipeline(object):
return input
def run_multithreads(self):
import threading
if self.multi_camera:
multi_res = []
threads = []
for idx, (predictor,
input) in enumerate(zip(self.predictor, self.input)):
thread = threading.Thread(
name=str(idx).zfill(3),
target=predictor.run,
args=(input, idx))
threads.append(thread)
for thread in threads:
thread.start()
for predictor, thread in zip(self.predictor, threads):
thread.join()
collector_data = predictor.get_result()
multi_res.append(collector_data)
if self.enable_mtmct:
mtmct_process(
multi_res,
self.input,
mtmct_vis=self.vis_result,
output_dir=self.output_dir)
else:
self.predictor.run(self.input)
def run(self):
if self.multi_camera:
multi_res = []
......@@ -437,9 +478,9 @@ class PipePredictor(object):
def get_result(self):
return self.collector.get_res()
def run(self, input):
def run(self, input, thread_idx=0):
if self.is_video:
self.predict_video(input)
self.predict_video(input, thread_idx=thread_idx)
else:
self.predict_image(input)
self.pipe_timer.info()
......@@ -525,14 +566,15 @@ class PipePredictor(object):
if self.cfg['visual']:
self.visualize_image(batch_file, batch_input, self.pipeline_res)
def predict_video(self, video_file):
def predict_video(self, video_file, thread_idx=0):
# mot
# mot -> attr
# mot -> pose -> action
capture = cv2.VideoCapture(video_file)
video_out_name = 'output.mp4' if self.file_name is None else self.file_name
if "rtsp" in video_file:
video_out_name = video_out_name + "_rtsp.mp4"
video_out_name = video_out_name + "_t" + str(thread_idx).zfill(
2) + "_rtsp.mp4"
# Get Video info : resolution, fps, frame count
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
......@@ -593,7 +635,7 @@ class PipePredictor(object):
while (1):
if frame_id % 10 == 0:
print('frame id: ', frame_id)
print('Thread: {}; frame id: {}'.format(thread_idx, frame_id))
ret, frame = capture.read()
if not ret:
......@@ -621,6 +663,10 @@ class PipePredictor(object):
self.pipe_timer.module_time['mot'].end()
self.pipe_timer.track_num += len(mot_res['boxes'])
if frame_id % 10 == 0:
print("Thread: {}; trackid number: {}".format(
thread_idx, len(mot_res['boxes'])))
# flow_statistic only support single class MOT
boxes, scores, ids = res[0] # batch size = 1 in MOT
mot_result = (frame_id + 1, boxes[0], scores[0],
......@@ -1021,7 +1067,8 @@ def main():
print_arguments(cfg)
pipeline = Pipeline(FLAGS, cfg)
pipeline.run()
# pipeline.run()
pipeline.run_multithreads()
if __name__ == '__main__':
......
......@@ -325,7 +325,7 @@ def flow_statistic(result,
info += ", Count during {} secs: {}".format(secs_interval,
curr_interval_count)
interval_id_set.clear()
print(info)
# print(info)
info += "\n"
records.append(info)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册