diff --git a/deploy/pipeline/docs/tutorials/PPHuman_QUICK_STARTED.md b/deploy/pipeline/docs/tutorials/PPHuman_QUICK_STARTED.md index 245db50b6a9d4d58066281fe7c70c69b92f89e80..0e1ddb92c7b9c733b6f7f61679cee35b9edf3450 100644 --- a/deploy/pipeline/docs/tutorials/PPHuman_QUICK_STARTED.md +++ b/deploy/pipeline/docs/tutorials/PPHuman_QUICK_STARTED.md @@ -129,6 +129,12 @@ python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pph python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml --video_dir=mtmct_dir/ --device=gpu [--run_mode trt_fp16] ``` +对rtsp流的支持,video_file后面的视频地址更换为rtsp流地址,示例如下: +``` +# 行人属性识别,指定配置文件路径和测试视频,在配置文件```deploy/pipeline/config/infer_cfg_pphuman.yml```中的ATTR部分enable设置为```True``` +python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml -o visual=False --video_file=rtsp://[YOUR_RTSP_SITE] --device=gpu [--run_mode trt_fp16] +``` + ### 参数说明 | 参数 | 是否必须|含义 | @@ -137,7 +143,7 @@ python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pph | --model_dir | Option | PP-Human中各任务模型路径,优先级高于配置文件, 例如`--model_dir det=better_det/ attr=better_attr/`| | --image_file | Option | 需要预测的图片 | | --image_dir | Option | 要预测的图片文件夹路径 | -| --video_file | Option | 需要预测的视频 | +| --video_file | Option | 需要预测的视频,或者rtsp流地址 | | --camera_id | Option | 用来预测的摄像头ID,默认为-1(表示不使用摄像头预测,可设置为:0 - (摄像头数目-1) ),预测过程中在可视化界面按`q`退出输出预测结果到:output/output.mp4| | --device | Option | 运行时的设备,可选择`CPU/GPU/XPU`,默认为`CPU`| | --output_dir | Option|可视化结果保存的根目录,默认为output/| diff --git a/deploy/pipeline/docs/tutorials/PPVehicle_QUICK_STARTED.md b/deploy/pipeline/docs/tutorials/PPVehicle_QUICK_STARTED.md index e559ef3087cb687baddbe011175f22b3705cc583..aa228337ac7614dfc263c3df484e770caee28043 100644 --- a/deploy/pipeline/docs/tutorials/PPVehicle_QUICK_STARTED.md +++ b/deploy/pipeline/docs/tutorials/PPVehicle_QUICK_STARTED.md @@ -84,6 +84,12 @@ python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppv ``` +对rtsp流的支持,video_file后面的视频地址更换为rtsp流地址,示例如下: +``` +# 车辆属性识别,指定配置文件路径和测试视频,在配置文件```deploy/pipeline/config/infer_cfg_ppvehicle.yml```中的ATTR部分enable设置为```True``` +python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml -o visual=False --video_file=rtsp://[YOUR_RTSP_SITE] --device=gpu [--run_mode trt_fp16] +``` + ### 参数说明 | 参数 | 是否必须|含义 | @@ -92,7 +98,7 @@ python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppv | --model_dir | Option | 各任务模型路径,优先级高于配置文件, 例如`--model_dir det=better_det/ attr=better_attr/`| | --image_file | Option | 需要预测的图片 | | --image_dir | Option | 要预测的图片文件夹路径 | -| --video_file | Option | 需要预测的视频 | +| --video_file | Option | 需要预测的视频,或者rtsp流地址 | | --camera_id | Option | 用来预测的摄像头ID,默认为-1(表示不使用摄像头预测,可设置为:0 - (摄像头数目-1) ),预测过程中在可视化界面按`q`退出输出预测结果到:output/output.mp4| | --device | Option | 运行时的设备,可选择`CPU/GPU/XPU`,默认为`CPU`| | --output_dir | Option|可视化结果保存的根目录,默认为output/| diff --git a/deploy/pipeline/pipeline.py b/deploy/pipeline/pipeline.py index fe3654dae23620a4f40b25ff7431ed34243e40a7..7683da175b86dfbe429063b28c2438b7aa0e7ad2 100644 --- a/deploy/pipeline/pipeline.py +++ b/deploy/pipeline/pipeline.py @@ -130,7 +130,9 @@ class Pipeline(object): self.multi_camera = False elif video_file is not None: - assert os.path.exists(video_file), "video_file not exists." + assert os.path.exists( + video_file + ) or 'rtsp' in video_file, "video_file not exists and not an rtsp site." self.multi_camera = False input = video_file self.is_video = True @@ -659,6 +661,8 @@ class PipePredictor(object): # mot -> pose -> action capture = cv2.VideoCapture(video_file) video_out_name = 'output.mp4' if self.file_name is None else self.file_name + if "rtsp" in video_file: + video_out_name = video_out_name + "_rtsp.mp4" # Get Video info : resolution, fps, frame count width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) @@ -767,8 +771,10 @@ class PipePredictor(object): if self.with_vehicleplate: if frame_id > self.warmup_frame: self.pipe_timer.module_time['vehicleplate'].start() + plate_input, _, _ = crop_image_with_mot( + frame_rgb, mot_res, expand=False) platelicense = self.vehicleplate_detector.get_platelicense( - crop_input) + plate_input) if frame_id > self.warmup_frame: self.pipe_timer.module_time['vehicleplate'].end() self.pipeline_res.update(platelicense, 'vehicleplate') diff --git a/deploy/pipeline/ppvehicle/vehicle_plate.py b/deploy/pipeline/ppvehicle/vehicle_plate.py index 42e5ba4b0a1cb1bfcd7fb1422cf8b700d904294c..6a41be6f1ecd92da842cc7006bedad3e372ae92c 100644 --- a/deploy/pipeline/ppvehicle/vehicle_plate.py +++ b/deploy/pipeline/ppvehicle/vehicle_plate.py @@ -268,7 +268,7 @@ class PlateRecognizer(object): platelicense = "" for text_info in text_pcar: text = text_info[0][0][0] - if len(text) > 2 and text[0] in simcode and len(text) < 10: + if len(text) > 2 and len(text) < 10: platelicense = text plate_all["plate"].append(platelicense) return plate_all diff --git a/deploy/python/visualize.py b/deploy/python/visualize.py index 24aa40796f7ef9ae4d55df46f9a6223119183bec..626da02555985a5568f3bdaff20705d8a8dd1c11 100644 --- a/deploy/python/visualize.py +++ b/deploy/python/visualize.py @@ -418,7 +418,7 @@ def visualize_vehicleplate(im, results, boxes=None): im = np.ascontiguousarray(np.copy(im)) im_h, im_w = im.shape[:2] - text_scale = max(0.5, im.shape[0] / 3000.) + text_scale = max(1.0, im.shape[0] / 1600.) text_thickness = 1 line_inter = im.shape[0] / 40.