未验证 提交 c308f983 编写于 作者: XYZ_916's avatar XYZ_916 提交者: GitHub

Develop: Ease of use optimization (#6569)

* 1. add -o command for pipeline;
2. remove basemode config;
3. add example configs for pphuman.

* copyright to Copyright

* 1. update docs for quick start;
2. delete merge_model_dir;
3. move cfg codes to cfg_utils.py

* Update QUICK_STARTED.md

* Update PPVehicle_QUICK_STARTED.md

* Update pphuman_action.md

* Update pphuman_action_en.md

* Update pphuman_attribute.md

* Update pphuman_attribute_en.md

* Update cfg_utils.py
上级 83f6e924
import ast
import yaml
import copy
import argparse
from argparse import ArgumentParser, RawDescriptionHelpFormatter
class ArgsParser(ArgumentParser):
def __init__(self):
super(ArgsParser, self).__init__(
formatter_class=RawDescriptionHelpFormatter)
self.add_argument(
"-o", "--opt", nargs='*', help="set configuration options")
def parse_args(self, argv=None):
args = super(ArgsParser, self).parse_args(argv)
assert args.config is not None, \
"Please specify --config=configure_file_path."
args.opt = self._parse_opt(args.opt)
return args
def _parse_opt(self, opts):
config = {}
if not opts:
return config
for s in opts:
s = s.strip()
k, v = s.split('=', 1)
if '.' not in k:
config[k] = yaml.load(v, Loader=yaml.Loader)
else:
keys = k.split('.')
if keys[0] not in config:
config[keys[0]] = {}
cur = config[keys[0]]
for idx, key in enumerate(keys[1:]):
if idx == len(keys) - 2:
cur[key] = yaml.load(v, Loader=yaml.Loader)
else:
cur[key] = {}
cur = cur[key]
return config
def argsparser():
parser = ArgsParser()
parser.add_argument(
"--config",
type=str,
default=None,
help=("Path of configure"),
required=True)
parser.add_argument(
"--image_file", type=str, default=None, help="Path of image file.")
parser.add_argument(
"--image_dir",
type=str,
default=None,
help="Dir of image file, `image_file` has a higher priority.")
parser.add_argument(
"--video_file",
type=str,
default=None,
help="Path of video file, `video_file` or `camera_id` has a highest priority."
)
parser.add_argument(
"--video_dir",
type=str,
default=None,
help="Dir of video file, `video_file` has a higher priority.")
parser.add_argument(
"--camera_id",
type=int,
default=-1,
help="device id of camera to predict.")
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="Directory of output visualization files.")
parser.add_argument(
"--run_mode",
type=str,
default='paddle',
help="mode of running(paddle/trt_fp32/trt_fp16/trt_int8)")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU."
)
parser.add_argument(
"--enable_mkldnn",
type=ast.literal_eval,
default=False,
help="Whether use mkldnn with CPU.")
parser.add_argument(
"--cpu_threads", type=int, default=1, help="Num of threads with CPU.")
parser.add_argument(
"--trt_min_shape", type=int, default=1, help="min_shape for TensorRT.")
parser.add_argument(
"--trt_max_shape",
type=int,
default=1280,
help="max_shape for TensorRT.")
parser.add_argument(
"--trt_opt_shape",
type=int,
default=640,
help="opt_shape for TensorRT.")
parser.add_argument(
"--trt_calib_mode",
type=bool,
default=False,
help="If the model is produced by TRT offline quantitative "
"calibration, trt_calib_mode need to set True.")
parser.add_argument(
"--do_entrance_counting",
action='store_true',
help="Whether counting the numbers of identifiers entering "
"or getting out from the entrance. Note that only support single-class MOT."
)
parser.add_argument(
"--do_break_in_counting",
action='store_true',
help="Whether counting the numbers of identifiers break in "
"the area. Note that only support single-class MOT and "
"the video should be taken by a static camera.")
parser.add_argument(
"--region_type",
type=str,
default='horizontal',
help="Area type for entrance counting or break in counting, 'horizontal' and "
"'vertical' used when do entrance counting. 'custom' used when do break in counting. "
"Note that only support single-class MOT, and the video should be taken by a static camera."
)
parser.add_argument(
'--region_polygon',
nargs='+',
type=int,
default=[],
help="Clockwise point coords (x0,y0,x1,y1...) of polygon of area when "
"do_break_in_counting. Note that only support single-class MOT and "
"the video should be taken by a static camera.")
parser.add_argument(
"--secs_interval",
type=int,
default=2,
help="The seconds interval to count after tracking")
parser.add_argument(
"--draw_center_traj",
action='store_true',
help="Whether drawing the trajectory of center")
return parser
def merge_cfg(args):
# load config
with open(args.config) as f:
pred_config = yaml.safe_load(f)
def merge(cfg, arg):
# update cfg from arg directly
merge_cfg = copy.deepcopy(cfg)
for k, v in cfg.items():
if k in arg:
merge_cfg[k] = arg[k]
else:
if isinstance(v, dict):
merge_cfg[k] = merge(v, arg)
return merge_cfg
def merge_opt(cfg, arg):
merge_cfg = copy.deepcopy(cfg)
# merge opt
if 'opt' in arg.keys() and arg['opt']:
for name, value in arg['opt'].items(
): # example: {'MOT': {'batch_size': 3}}
if name not in merge_cfg.keys():
print("No", name, "in config file!")
continue
for sub_k, sub_v in value.items():
if sub_k not in merge_cfg[name].keys():
print("No", sub_k, "in config file of", name, "!")
continue
merge_cfg[name][sub_k] = sub_v
return merge_cfg
args_dict = vars(args)
pred_config = merge(pred_config, args_dict)
pred_config = merge_opt(pred_config, args_dict)
return pred_config
def print_arguments(cfg):
print('----------- Running Arguments -----------')
buffer = yaml.dump(cfg)
print(buffer)
print('------------------------------------------')
crop_thresh: 0.5
visual: True
warmup_frame: 50
MOT:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.zip
tracker_config: deploy/pipeline/config/tracker_config.yml
batch_size: 1
enable: True
ID_BASED_CLSACTION:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/PPHGNet_tiny_calling_halfbody.zip
batch_size: 8
threshold: 0.8
display_frames: 80
skip_frame_num: 2
enable: True
crop_thresh: 0.5
kpt_thresh: 0.2
visual: True
warmup_frame: 50
MOT:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.zip
tracker_config: deploy/pipeline/config/tracker_config.yml
batch_size: 1
enable: True
KPT:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/dark_hrnet_w32_256x192.zip
batch_size: 8
SKELETON_ACTION:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/STGCN.zip
batch_size: 1
max_frames: 50
display_frames: 80
coord_size: [384, 512]
enable: True
visual: True
warmup_frame: 50
VIDEO_ACTION:
model_dir: https://videotag.bj.bcebos.com/PaddleVideo-release2.3/ppTSM_fight.zip
batch_size: 1
frame_len: 8
sample_freq: 7
short_size: 340
target_size: 320
enable: True
crop_thresh: 0.5
attr_thresh: 0.5
visual: True
warmup_frame: 50
MOT:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.zip
tracker_config: deploy/pipeline/config/tracker_config.yml
batch_size: 1
enable: True
ATTR:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/PPLCNet_x1_0_person_attribute_945_infer.zip
batch_size: 8
enable: True
crop_thresh: 0.5
visual: True
warmup_frame: 50
MOT:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.zip
tracker_config: deploy/pipeline/config/tracker_config.yml
batch_size: 1
enable: True
crop_thresh: 0.5
visual: True
warmup_frame: 50
MOT:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.zip
tracker_config: deploy/pipeline/config/tracker_config.yml
batch_size: 1
enable: True
REID:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/reid_model.zip
batch_size: 16
enable: True
crop_thresh: 0.5
visual: True
warmup_frame: 50
MOT:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.zip
tracker_config: deploy/pipeline/config/tracker_config.yml
batch_size: 1
enable: True
ID_BASED_DETACTION:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/ppyoloe_crn_s_80e_smoking_visdrone.zip
batch_size: 8
threshold: 0.6
display_frames: 80
skip_frame_num: 2
enable: True
......@@ -12,7 +12,6 @@ MOT:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.zip
tracker_config: deploy/pipeline/config/tracker_config.yml
batch_size: 1
basemode: "idbased"
enable: False
KPT:
......@@ -22,7 +21,6 @@ KPT:
ATTR:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/PPLCNet_x1_0_person_attribute_945_infer.zip
batch_size: 8
basemode: "idbased"
enable: False
VIDEO_ACTION:
......@@ -32,7 +30,6 @@ VIDEO_ACTION:
sample_freq: 7
short_size: 340
target_size: 320
basemode: "videobased"
enable: False
SKELETON_ACTION:
......@@ -41,13 +38,11 @@ SKELETON_ACTION:
max_frames: 50
display_frames: 80
coord_size: [384, 512]
basemode: "skeletonbased"
enable: False
ID_BASED_DETACTION:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/ppyoloe_crn_s_80e_smoking_visdrone.zip
batch_size: 8
basemode: "idbased"
threshold: 0.6
display_frames: 80
skip_frame_num: 2
......@@ -56,7 +51,6 @@ ID_BASED_DETACTION:
ID_BASED_CLSACTION:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/PPHGNet_tiny_calling_halfbody.zip
batch_size: 8
basemode: "idbased"
threshold: 0.8
display_frames: 80
skip_frame_num: 2
......@@ -65,5 +59,4 @@ ID_BASED_CLSACTION:
REID:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/reid_model.zip
batch_size: 16
basemode: "idbased"
enable: False
......@@ -10,7 +10,6 @@ MOT:
model_dir: output_inference/mot_ppyoloe_l_36e_ppvehicle/
tracker_config: deploy/pipeline/config/tracker_config.yml
batch_size: 1
basemode: "idbased"
enable: False
VEHICLE_PLATE:
......@@ -21,13 +20,11 @@ VEHICLE_PLATE:
rec_image_shape: [3, 48, 320]
rec_batch_num: 6
word_dict_path: deploy/pipeline/ppvehicle/rec_word_dict.txt
basemode: "idbased"
enable: False
VEHICLE_ATTR:
model_dir: output_inference/vehicle_attribute_infer/
batch_size: 8
basemode: "idbased"
color_threshold: 0.5
type_threshold: 0.5
enable: False
......@@ -35,5 +32,4 @@ VEHICLE_ATTR:
REID:
model_dir: output_inference/vehicle_reid_model/
batch_size: 16
basemode: "idbased"
enable: False
......@@ -69,7 +69,7 @@ PP-Human提供了目标检测、属性识别、行为识别、ReID预训练模
## 配置文件说明
PP-Human相关配置位于```deploy/pipeline/config/infer_cfg_pphuman.yml```中,存放模型路径,完成不同功能需要设置不同的任务类型
PP-Human相关配置位于```deploy/pipeline/config/infer_cfg_pphuman.yml```中,存放模型路径,该配置文件中包含了目前PP-Human支持的所有功能。如果想要查看某个单一功能的配置,请参见```deploy/pipeline/config/examples/```中相关配置。此外,配置文件中的内容可以通过```-o```命令行参数修改,如修改属性的模型目录,则可通过```-o ATTR.model_dir="DIR_PATH"```进行设置。
功能及任务类型对应表单如下:
......@@ -90,20 +90,17 @@ MOT:
model_dir: output_inference/mot_ppyoloe_l_36e_pipeline/
tracker_config: deploy/pipeline/config/tracker_config.yml
batch_size: 1
basemode: "idbased"
enable: True
ATTR:
model_dir: output_inference/strongbaseline_r50_30e_pa100k/
batch_size: 8
basemode: "idbased"
enable: True
```
**注意:**
- 如果用户需要实现不同任务,可以在配置文件对应enable选项设置为True, 其basemode类型会在代码中开启依赖的基础能力模型,比如跟踪模型。
- 如果用户仅需要修改模型文件路径,可以在命令行中加入 `--model_dir det=ppyoloe/` 即可,也可以手动修改配置文件中的相应模型路径,详细说明参考下方参数说明文档。
- 如果用户需要实现不同任务,可以在配置文件对应enable选项设置为True。
## 预测部署
......@@ -117,7 +114,7 @@ python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pph
# 行人跟踪,指定配置文件路径,模型路径和测试视频,在配置文件```deploy/pipeline/config/infer_cfg_pphuman.yml```中的MOT部分enable设置为```True```
# 命令行中指定的模型路径优先级高于配置文件
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml --video_file=test_video.mp4 --device=gpu --model_dir det=ppyoloe/ [--run_mode trt_fp16]
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml --video_file=test_video.mp4 --device=gpu [--run_mode trt_fp16]
# 行人属性识别,指定配置文件路径和测试视频,在配置文件```deploy/pipeline/config/infer_cfg_pphuman.yml```中的ATTR部分enable设置为```True```
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml --video_file=test_video.mp4 --device=gpu [--run_mode trt_fp16]
......@@ -127,6 +124,10 @@ python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pph
# 行人跨境跟踪,指定配置文件路径和测试视频列表文件夹,在配置文件```deploy/pipeline/config/infer_cfg_pphuman.yml```中的REID部分enable设置为```True```
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml --video_dir=mtmct_dir/ --device=gpu [--run_mode trt_fp16]
# 行人跨境跟踪,指定配置文件路径和测试视频列表文件夹,直接使用```deploy/pipeline/config/examples/infer_cfg_reid.yml```配置文件,并利用```-o```命令修改跟踪模型路径
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_reid.yml --video_dir=mtmct_dir/ -o MOT.model_dir="mot_model_dir" --device=gpu [--run_mode trt_fp16]
```
对rtsp流的支持,video_file后面的视频地址更换为rtsp流地址,示例如下:
......@@ -140,7 +141,7 @@ python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pph
| 参数 | 是否必须|含义 |
|-------|-------|----------|
| --config | Yes | 配置文件路径 |
| --model_dir | Option | PP-Human中各任务模型路径,优先级高于配置文件, 例如`--model_dir det=better_det/ attr=better_attr/`|
| -o | Option | 覆盖配置文件中对应的配置 |
| --image_file | Option | 需要预测的图片 |
| --image_dir | Option | 要预测的图片文件夹路径 |
| --video_file | Option | 需要预测的视频,或者rtsp流地址 |
......
......@@ -62,7 +62,7 @@ PP-Vehicle相关配置位于```deploy/pipeline/config/infer_cfg_ppvehicle.yml```
**注意:**
- 如果用户需要实现不同任务,可以在配置文件对应enable选项设置为True, 其basemode类型会在代码中开启依赖的基础能力模型,比如跟踪模型
- 如果用户需要实现不同任务,可以在配置文件对应enable选项设置为True。
- 如果用户仅需要修改模型文件路径,可以在命令行中加入 `--model_dir det=ppyoloe/` 即可,也可以手动修改配置文件中的相应模型路径,详细说明参考下方参数说明文档。
......
......@@ -46,7 +46,6 @@ SKELETON_ACTION: # 基于骨骼点的行为识别模型配置
max_frames: 50 # 动作片段对应的帧数。在行人ID对应时序骨骼点结果时达到该帧数后,会通过行为识别模型判断该段序列的动作类型。与训练设置一致时效果最佳。
display_frames: 80 # 显示帧数。当预测结果为摔倒时,在对应人物ID中显示状态的持续时间。
coord_size: [384, 512] # 坐标统一缩放到的尺度大小。与训练设置一致时效果最佳。
basemode: "skeletonbased" # 模型基于的路线分支,是否需要skeleton作为输入
enable: False # 是否开启该功能
```
......@@ -112,7 +111,6 @@ python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pph
ID_BASED_CLSACTION: # 基于分类的行为识别模型配置
model_dir: output_inference/PPHGNet_tiny_calling_halfbody # 模型所在路径
batch_size: 8 # 预测批大小
basemode: "idbased" # 模型基于的路线分支,是否基于跟踪获得的ID信息
threshold: 0.45 #识别为对应行为的阈值
display_frames: 80 # 显示帧数。当识别到对应动作时,在对应人物ID中显示状态的持续时间。
enable: False # 是否开启该功能
......@@ -168,7 +166,6 @@ python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pph
ID_BASED_DETACTION: # 基于检测的行为识别模型配置
model_dir: output_inference/ppyoloe_crn_s_80e_smoking_visdrone # 模型所在路径
batch_size: 8 # 预测批大小
basemode: "idbased" # 模型基于的路线分支,是否基于跟踪获得的ID信息
threshold: 0.4 # 识别为对应行为的阈值
display_frames: 80 # 显示帧数。当识别到对应动作时,在对应人物ID中显示状态的持续时间。
enable: False # 是否开启该功能
......@@ -242,7 +239,6 @@ VIDEO_ACTION: # 基于视频分类的行为识别模型配置
sample_freq: 7 # 抽样频率,即间隔多少帧抽样一帧
short_size: 340 # 视频帧尺度变换最小边的长度
target_size: 320 # 目标视频帧的大小
basemode: "videobased" # 模型基于的路线分支,是否直接使用视频进行输入
enable: False # 是否开启该功能
```
......
......@@ -53,7 +53,6 @@ SKELETON_ACTION: # Config for skeleton-based action recognition model
max_frames: 50 # The number of frames of action segments. When frames of time-ordered skeleton keypoints of each pedestrian ID achieve the max value,the action type will be judged by the action recognition model. If the setting is the same as the training, there will be an ideal inference result.
display_frames: 80 # The number of display frames. When the inferred action type is falling down, the time length of the act will be displayed in the ID.
coord_size: [384, 512] # The unified size of the coordinate, which is the best when it is the same as the training setting.
basemode: "skeletonbased" # The models which is based on,whether we need the skeleton model.
enable: False # Whether to enable this function
```
......@@ -115,7 +114,6 @@ Parameters related to action recognition in the [config file](../../config/infer
ID_BASED_CLSACTION: # config for classfication-based action recognition model
model_dir: output_inference/PPHGNet_tiny_calling_halfbody # Path of the model
batch_size: 8 # The size of the inference batch
basemode: "idbased" # the models which is based on, whether the id of each object obtained by tracking is needed.
threshold: 0.45 # Threshold for corresponding behavior
display_frames: 80 # The number of display frames. When the corresponding action is detected, the time length of the act will be displayed in the ID.
enable: False # Whether to enable this function
......@@ -162,7 +160,6 @@ Parameters related to action recognition in the [config file](../../config/infer
ID_BASED_DETACTION: # Config for detection-based action recognition model
model_dir: output_inference/ppyoloe_crn_s_80e_smoking_visdrone # Path of the model
batch_size: 8 # The size of the inference batch
basemode: "idbased" # The models which is based on, whether the id of each object obtained by tracking is needed.
threshold: 0.4 # Threshold for corresponding behavior.
display_frames: 80 # The number of display frames. When the corresponding action is detected, the time length of the act will be displayed in the ID.
enable: False # Whether to enable this function
......@@ -220,7 +217,6 @@ VIDEO_ACTION: # Config for detection-based action recognition model
sample_freq: 7 # Sampling frequency. It means how many frames to sample one frame.
short_size: 340 # The shortest length for video frame scaling transforms.
target_size: 320 # Target size for input video
basemode: "videobased" # The models which is based on, whether to use video as model input.
enable: False # Whether to enable this function
```
......
......@@ -27,7 +27,6 @@
ATTR: #模块名称
model_dir: output_inference/PPLCNet_x1_0_person_attribute_945_infer/ #模型路径
batch_size: 8 #推理最大batchsize
basemode: "idbased" #流程类型,'idbased'表示基于跟踪模型
enable: False #功能是否开启
```
......
......@@ -24,7 +24,6 @@ The meaning of configs of `infer_cfg_pphuman.yml`:
ATTR: #module name
model_dir: output_inference/PPLCNet_x1_0_person_attribute_945_infer/ #model path
batch_size: 8 #maxmum batchsize when inference
basemode: "idbased" #the routing type of pipeline,'idbased' means this model is based on tracking.
enable: False #whether to enable this model
```
......
......@@ -15,7 +15,6 @@
import time
import os
import ast
import argparse
import glob
import yaml
import copy
......@@ -24,120 +23,6 @@ import numpy as np
from python.keypoint_preprocess import EvalAffine, TopDownEvalAffine, expand_crop
def argsparser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--config",
type=str,
default=None,
help=("Path of configure"),
required=True)
parser.add_argument(
"--image_file", type=str, default=None, help="Path of image file.")
parser.add_argument(
"--image_dir",
type=str,
default=None,
help="Dir of image file, `image_file` has a higher priority.")
parser.add_argument(
"--video_file",
type=str,
default=None,
help="Path of video file, `video_file` or `camera_id` has a highest priority."
)
parser.add_argument(
"--video_dir",
type=str,
default=None,
help="Dir of video file, `video_file` has a higher priority.")
parser.add_argument(
"--model_dir", nargs='*', help="set model dir in pipeline")
parser.add_argument(
"--camera_id",
type=int,
default=-1,
help="device id of camera to predict.")
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="Directory of output visualization files.")
parser.add_argument(
"--run_mode",
type=str,
default='paddle',
help="mode of running(paddle/trt_fp32/trt_fp16/trt_int8)")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU."
)
parser.add_argument(
"--enable_mkldnn",
type=ast.literal_eval,
default=False,
help="Whether use mkldnn with CPU.")
parser.add_argument(
"--cpu_threads", type=int, default=1, help="Num of threads with CPU.")
parser.add_argument(
"--trt_min_shape", type=int, default=1, help="min_shape for TensorRT.")
parser.add_argument(
"--trt_max_shape",
type=int,
default=1280,
help="max_shape for TensorRT.")
parser.add_argument(
"--trt_opt_shape",
type=int,
default=640,
help="opt_shape for TensorRT.")
parser.add_argument(
"--trt_calib_mode",
type=bool,
default=False,
help="If the model is produced by TRT offline quantitative "
"calibration, trt_calib_mode need to set True.")
parser.add_argument(
"--do_entrance_counting",
action='store_true',
help="Whether counting the numbers of identifiers entering "
"or getting out from the entrance. Note that only support single-class MOT."
)
parser.add_argument(
"--do_break_in_counting",
action='store_true',
help="Whether counting the numbers of identifiers break in "
"the area. Note that only support single-class MOT and "
"the video should be taken by a static camera.")
parser.add_argument(
"--region_type",
type=str,
default='horizontal',
help="Area type for entrance counting or break in counting, 'horizontal' and "
"'vertical' used when do entrance counting. 'custom' used when do break in counting. "
"Note that only support single-class MOT, and the video should be taken by a static camera."
)
parser.add_argument(
'--region_polygon',
nargs='+',
type=int,
default=[],
help="Clockwise point coords (x0,y0,x1,y1...) of polygon of area when "
"do_break_in_counting. Note that only support single-class MOT and "
"the video should be taken by a static camera.")
parser.add_argument(
"--secs_interval",
type=int,
default=2,
help="The seconds interval to count after tracking")
parser.add_argument(
"--draw_center_traj",
action='store_true',
help="Whether drawing the trajectory of center")
return parser
class Times(object):
def __init__(self):
self.time = 0.
......@@ -233,49 +118,6 @@ class PipeTimer(Times):
return dic
def merge_model_dir(args, model_dir):
# set --model_dir DET=ppyoloe/ to overwrite the model_dir in config file
task_set = ['DET', 'ATTR', 'MOT', 'KPT', 'SKELETON_ACTION', 'REID']
if not model_dir:
return args
for md in model_dir:
md = md.strip()
k, v = md.split('=', 1)
k_upper = k.upper()
assert k_upper in task_set, 'Illegal type of task, expect task are: {}, but received {}'.format(
task_set, k)
args[k_upper].update({'model_dir': v})
return args
def merge_cfg(args):
with open(args.config) as f:
pred_config = yaml.safe_load(f)
def merge(cfg, arg):
merge_cfg = copy.deepcopy(cfg)
for k, v in cfg.items():
if k in arg:
merge_cfg[k] = arg[k]
else:
if isinstance(v, dict):
merge_cfg[k] = merge(v, arg)
return merge_cfg
args_dict = vars(args)
model_dir = args_dict.pop('model_dir')
pred_config = merge_model_dir(pred_config, model_dir)
pred_config = merge(pred_config, args_dict)
return pred_config
def print_arguments(cfg):
print('----------- Running Arguments -----------')
buffer = yaml.dump(cfg)
print(buffer)
print('------------------------------------------')
def get_test_images(infer_dir, infer_img):
"""
Get image path list in TEST mode
......
......@@ -28,7 +28,8 @@ from datacollector import DataCollector, Result
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
sys.path.insert(0, parent_path)
from pipe_utils import argsparser, print_arguments, merge_cfg, PipeTimer
from cfg_utils import argsparser, print_arguments, merge_cfg
from pipe_utils import PipeTimer
from pipe_utils import get_test_images, crop_image_with_det, crop_image_with_mot, parse_mot_res, parse_mot_keypoint
from python.infer import Detector, DetectorPicoDet
......@@ -331,6 +332,18 @@ class PipePredictor(object):
"skeletonbased": False
}
self.basemode = {
"MOT": "idbased",
"ATTR": "idbased",
"VIDEO_ACTION": "videobased",
"SKELETON_ACTION": "skeletonbased",
"ID_BASED_DETACTION": "idbased",
"ID_BASED_CLSACTION": "idbased",
"REID": "idbased",
"VEHICLE_PLATE": "idbased",
"VEHICLE_ATTR": "idbased",
}
self.is_video = is_video
self.multi_camera = multi_camera
self.cfg = cfg
......@@ -363,7 +376,7 @@ class PipePredictor(object):
attr_cfg = self.cfg['ATTR']
model_dir = model_dir_dict['ATTR']
batch_size = attr_cfg['batch_size']
basemode = attr_cfg['basemode']
basemode = self.basemode['ATTR']
self.modebase[basemode] = True
self.attr_predictor = AttrDetector(
model_dir, device, run_mode, batch_size, trt_min_shape,
......@@ -376,7 +389,7 @@ class PipePredictor(object):
batch_size = vehicleattr_cfg['batch_size']
color_threshold = vehicleattr_cfg['color_threshold']
type_threshold = vehicleattr_cfg['type_threshold']
basemode = vehicleattr_cfg['basemode']
basemode = self.basemode['VEHICLE_ATTR']
self.modebase[basemode] = True
self.vehicle_attr_predictor = VehicleAttr(
model_dir, device, run_mode, batch_size, trt_min_shape,
......@@ -388,7 +401,7 @@ class PipePredictor(object):
attr_cfg = self.cfg['ATTR']
model_dir = model_dir_dict['ATTR']
batch_size = attr_cfg['batch_size']
basemode = attr_cfg['basemode']
basemode = self.basemode['ATTR']
self.modebase[basemode] = True
self.attr_predictor = AttrDetector(
model_dir, device, run_mode, batch_size, trt_min_shape,
......@@ -398,7 +411,7 @@ class PipePredictor(object):
idbased_detaction_cfg = self.cfg['ID_BASED_DETACTION']
model_dir = model_dir_dict['ID_BASED_DETACTION']
batch_size = idbased_detaction_cfg['batch_size']
basemode = idbased_detaction_cfg['basemode']
basemode = self.basemode['ID_BASED_DETACTION']
threshold = idbased_detaction_cfg['threshold']
display_frames = idbased_detaction_cfg['display_frames']
skip_frame_num = idbased_detaction_cfg['skip_frame_num']
......@@ -424,7 +437,7 @@ class PipePredictor(object):
idbased_clsaction_cfg = self.cfg['ID_BASED_CLSACTION']
model_dir = model_dir_dict['ID_BASED_CLSACTION']
batch_size = idbased_clsaction_cfg['batch_size']
basemode = idbased_clsaction_cfg['basemode']
basemode = self.basemode['ID_BASED_CLSACTION']
threshold = idbased_clsaction_cfg['threshold']
self.modebase[basemode] = True
display_frames = idbased_clsaction_cfg['display_frames']
......@@ -453,7 +466,7 @@ class PipePredictor(object):
skeleton_action_frames = skeleton_action_cfg['max_frames']
display_frames = skeleton_action_cfg['display_frames']
self.coord_size = skeleton_action_cfg['coord_size']
basemode = skeleton_action_cfg['basemode']
basemode = self.basemode['SKELETON_ACTION']
self.modebase[basemode] = True
self.skeleton_action_predictor = SkeletonActionRecognizer(
......@@ -493,7 +506,7 @@ class PipePredictor(object):
vehicleplate_cfg = self.cfg['VEHICLE_PLATE']
self.vehicleplate_detector = PlateRecognizer(args,
vehicleplate_cfg)
basemode = vehicleplate_cfg['basemode']
basemode = self.basemode['VEHICLE_PLATE']
self.modebase[basemode] = True
if self.with_vehicle_attr:
......@@ -502,7 +515,7 @@ class PipePredictor(object):
batch_size = vehicleattr_cfg['batch_size']
color_threshold = vehicleattr_cfg['color_threshold']
type_threshold = vehicleattr_cfg['type_threshold']
basemode = vehicleattr_cfg['basemode']
basemode = self.basemode['VEHICLE_ATTR']
self.modebase[basemode] = True
self.vehicle_attr_predictor = VehicleAttr(
model_dir, device, run_mode, batch_size, trt_min_shape,
......@@ -513,7 +526,7 @@ class PipePredictor(object):
reid_cfg = self.cfg['REID']
model_dir = model_dir_dict['REID']
batch_size = reid_cfg['batch_size']
basemode = reid_cfg['basemode']
basemode = self.basemode['REID']
self.modebase[basemode] = True
self.reid_predictor = ReID(
model_dir, device, run_mode, batch_size, trt_min_shape,
......@@ -526,7 +539,7 @@ class PipePredictor(object):
model_dir = model_dir_dict['MOT']
tracker_config = mot_cfg['tracker_config']
batch_size = mot_cfg['batch_size']
basemode = mot_cfg['basemode']
basemode = self.basemode['MOT']
self.modebase[basemode] = True
self.mot_predictor = SDE_Detector(
model_dir,
......@@ -550,7 +563,7 @@ class PipePredictor(object):
if self.with_video_action:
video_action_cfg = self.cfg['VIDEO_ACTION']
basemode = video_action_cfg['basemode']
basemode = self.basemode['VIDEO_ACTION']
self.modebase[basemode] = True
video_action_model_dir = model_dir_dict['VIDEO_ACTION']
......@@ -1087,7 +1100,7 @@ class PipePredictor(object):
def main():
cfg = merge_cfg(FLAGS)
cfg = merge_cfg(FLAGS) # use command params to update config
print_arguments(cfg)
pipeline = Pipeline(FLAGS, cfg)
......@@ -1096,6 +1109,8 @@ def main():
if __name__ == '__main__':
paddle.enable_static()
# parse params from command
parser = argsparser()
FLAGS = parser.parse_args()
FLAGS.device = FLAGS.device.upper()
......
......@@ -31,7 +31,7 @@ from python.infer import get_test_images
from python.preprocess import preprocess, NormalizeImage, Permute, Resize_Mult32
from pipeline.ppvehicle.vehicle_plateutils import create_predictor, get_infer_gpuid, get_rotate_crop_image, draw_boxes
from pipeline.ppvehicle.vehicleplate_postprocess import build_post_process
from pipeline.pipe_utils import merge_cfg, print_arguments, argsparser
from pipeline.cfg_utils import merge_cfg, print_arguments, argsparser
class PlateDetector(object):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册