提交 5f9e8f01 编写于 作者: Z zhiboniu 提交者: zhiboniu

move vehicle plate to new path

上级 ac252a34
...@@ -32,6 +32,18 @@ def argsparser(): ...@@ -32,6 +32,18 @@ def argsparser():
default=None, default=None,
help=("Path of configure"), help=("Path of configure"),
required=True) required=True)
parser.add_argument("--det_algorithm", type=str, default='DB')
parser.add_argument("--det_model_dir", type=str)
parser.add_argument("--det_limit_side_len", type=float, default=960)
parser.add_argument("--det_limit_type", type=str, default='max')
parser.add_argument("--rec_algorithm", type=str, default='SVTR_LCNet')
parser.add_argument("--rec_model_dir", type=str)
parser.add_argument("--rec_image_shape", type=str, default="3, 48, 320")
parser.add_argument("--rec_batch_num", type=int, default=6)
parser.add_argument(
"--word_dict_path",
type=str,
default="deploy/pphuman/rec_word_dict.txt")
parser.add_argument( parser.add_argument(
"--image_file", type=str, default=None, help="Path of image file.") "--image_file", type=str, default=None, help="Path of image file.")
parser.add_argument( parser.add_argument(
......
...@@ -25,13 +25,15 @@ import paddle ...@@ -25,13 +25,15 @@ import paddle
import sys import sys
# add deploy path of PadleDetection to sys.path # add deploy path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..']))) # add deploy path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 3)))
sys.path.insert(0, parent_path) sys.path.insert(0, parent_path)
from infer import get_test_images, print_arguments from python.infer import get_test_images, print_arguments
from vechile_plateutils import create_predictor, get_infer_gpuid, argsparser, get_rotate_crop_image, draw_boxes from vechile_plateutils import create_predictor, get_infer_gpuid, get_rotate_crop_image, draw_boxes
from vecplatepostprocess import build_post_process from vecplatepostprocess import build_post_process
from preprocess import preprocess, NormalizeImage, Permute, Resize_Mult32 from python.preprocess import preprocess, NormalizeImage, Permute, Resize_Mult32
from vechile_plateutils import argsparser
class PlateDetector(object): class PlateDetector(object):
...@@ -63,25 +65,6 @@ class PlateDetector(object): ...@@ -63,25 +65,6 @@ class PlateDetector(object):
self.predictor, self.input_tensor, self.output_tensors, self.config = create_predictor( self.predictor, self.input_tensor, self.output_tensors, self.config = create_predictor(
args, 'det') args, 'det')
if args.run_benchmark:
import auto_log
pid = os.getpid()
gpu_id = get_infer_gpuid()
self.autolog = auto_log.AutoLogger(
model_name="det",
model_precision="fp32",
batch_size=1,
data_shape="dynamic",
save_path=None,
inference_config=self.config,
pids=pid,
process_name=None,
gpu_ids=gpu_id if args.device == "GPU" else None,
time_keys=[
'preprocess_time', 'inference_time', 'postprocess_time'
],
warmup=2, )
def preprocess(self, image_list): def preprocess(self, image_list):
preprocess_ops = [] preprocess_ops = []
for op_type, new_op_info in self.pre_process_list.items(): for op_type, new_op_info in self.pre_process_list.items():
...@@ -139,24 +122,16 @@ class PlateDetector(object): ...@@ -139,24 +122,16 @@ class PlateDetector(object):
def predict_image(self, img_list): def predict_image(self, img_list):
st = time.time() st = time.time()
if self.args.run_benchmark:
self.autolog.times.start()
img, shape_list = self.preprocess(img_list) img, shape_list = self.preprocess(img_list)
if img is None: if img is None:
return None, 0 return None, 0
if self.args.run_benchmark:
self.autolog.times.stamp()
self.input_tensor.copy_from_cpu(img) self.input_tensor.copy_from_cpu(img)
self.predictor.run() self.predictor.run()
outputs = [] outputs = []
for output_tensor in self.output_tensors: for output_tensor in self.output_tensors:
output = output_tensor.copy_to_cpu() output = output_tensor.copy_to_cpu()
outputs.append(output) outputs.append(output)
if self.args.run_benchmark:
self.autolog.times.stamp()
preds = {} preds = {}
preds['maps'] = outputs[0] preds['maps'] = outputs[0]
...@@ -171,14 +146,12 @@ class PlateDetector(object): ...@@ -171,14 +146,12 @@ class PlateDetector(object):
dt_boxes = self.filter_tag_det_res(dt_boxes, org_shape) dt_boxes = self.filter_tag_det_res(dt_boxes, org_shape)
dt_batch_boxes.append(dt_boxes) dt_batch_boxes.append(dt_boxes)
if self.args.run_benchmark:
self.autolog.times.end(stamp=True)
et = time.time() et = time.time()
return dt_batch_boxes, et - st return dt_batch_boxes, et - st
class TextRecognizer(object): class TextRecognizer(object):
def __init__(self, FLAGS, use_gpu=True, benchmark=False): def __init__(self, FLAGS, use_gpu=True):
self.rec_image_shape = [ self.rec_image_shape = [
int(v) for v in FLAGS.rec_image_shape.split(",") int(v) for v in FLAGS.rec_image_shape.split(",")
] ]
...@@ -219,26 +192,7 @@ class TextRecognizer(object): ...@@ -219,26 +192,7 @@ class TextRecognizer(object):
self.postprocess_op = build_post_process(postprocess_params) self.postprocess_op = build_post_process(postprocess_params)
self.predictor, self.input_tensor, self.output_tensors, self.config = \ self.predictor, self.input_tensor, self.output_tensors, self.config = \
create_predictor(FLAGS, 'rec') create_predictor(FLAGS, 'rec')
self.benchmark = benchmark
self.use_onnx = False self.use_onnx = False
if benchmark:
import auto_log
pid = os.getpid()
gpu_id = get_infer_gpuid()
self.autolog = auto_log.AutoLogger(
model_name="rec",
model_precision='fp32',
batch_size=batch_size,
data_shape="dynamic",
save_path=None, #save_log_path,
inference_config=self.config,
pids=pid,
process_name=None,
gpu_ids=gpu_id if use_gpu else None,
time_keys=[
'preprocess_time', 'inference_time', 'postprocess_time'
],
warmup=0)
def resize_norm_img(self, img, max_wh_ratio): def resize_norm_img(self, img, max_wh_ratio):
imgC, imgH, imgW = self.rec_image_shape imgC, imgH, imgW = self.rec_image_shape
...@@ -407,8 +361,6 @@ class TextRecognizer(object): ...@@ -407,8 +361,6 @@ class TextRecognizer(object):
rec_res = [['', 0.0]] * img_num rec_res = [['', 0.0]] * img_num
batch_num = self.rec_batch_num batch_num = self.rec_batch_num
st = time.time() st = time.time()
if self.benchmark:
self.autolog.times.start()
for beg_img_no in range(0, img_num, batch_num): for beg_img_no in range(0, img_num, batch_num):
end_img_no = min(img_num, beg_img_no + batch_num) end_img_no = min(img_num, beg_img_no + batch_num)
norm_img_batch = [] norm_img_batch = []
...@@ -453,8 +405,6 @@ class TextRecognizer(object): ...@@ -453,8 +405,6 @@ class TextRecognizer(object):
norm_img_batch.append(norm_img) norm_img_batch.append(norm_img)
norm_img_batch = np.concatenate(norm_img_batch) norm_img_batch = np.concatenate(norm_img_batch)
norm_img_batch = norm_img_batch.copy() norm_img_batch = norm_img_batch.copy()
if self.benchmark:
self.autolog.times.stamp()
if self.rec_algorithm == "SRN": if self.rec_algorithm == "SRN":
encoder_word_pos_list = np.concatenate(encoder_word_pos_list) encoder_word_pos_list = np.concatenate(encoder_word_pos_list)
...@@ -488,8 +438,6 @@ class TextRecognizer(object): ...@@ -488,8 +438,6 @@ class TextRecognizer(object):
for output_tensor in self.output_tensors: for output_tensor in self.output_tensors:
output = output_tensor.copy_to_cpu() output = output_tensor.copy_to_cpu()
outputs.append(output) outputs.append(output)
if self.benchmark:
self.autolog.times.stamp()
preds = {"predict": outputs[2]} preds = {"predict": outputs[2]}
elif self.rec_algorithm == "SAR": elif self.rec_algorithm == "SAR":
valid_ratios = np.concatenate(valid_ratios) valid_ratios = np.concatenate(valid_ratios)
...@@ -514,8 +462,6 @@ class TextRecognizer(object): ...@@ -514,8 +462,6 @@ class TextRecognizer(object):
for output_tensor in self.output_tensors: for output_tensor in self.output_tensors:
output = output_tensor.copy_to_cpu() output = output_tensor.copy_to_cpu()
outputs.append(output) outputs.append(output)
if self.benchmark:
self.autolog.times.stamp()
preds = outputs[0] preds = outputs[0]
else: else:
if self.use_onnx: if self.use_onnx:
...@@ -531,8 +477,6 @@ class TextRecognizer(object): ...@@ -531,8 +477,6 @@ class TextRecognizer(object):
for output_tensor in self.output_tensors: for output_tensor in self.output_tensors:
output = output_tensor.copy_to_cpu() output = output_tensor.copy_to_cpu()
outputs.append(output) outputs.append(output)
if self.benchmark:
self.autolog.times.stamp()
if len(outputs) != 1: if len(outputs) != 1:
preds = outputs preds = outputs
else: else:
...@@ -540,8 +484,6 @@ class TextRecognizer(object): ...@@ -540,8 +484,6 @@ class TextRecognizer(object):
rec_result = self.postprocess_op(preds) rec_result = self.postprocess_op(preds)
for rno in range(len(rec_result)): for rno in range(len(rec_result)):
rec_res[indices[beg_img_no + rno]] = rec_result[rno] rec_res[indices[beg_img_no + rno]] = rec_result[rno]
if self.benchmark:
self.autolog.times.end(stamp=True)
return rec_res, time.time() - st return rec_res, time.time() - st
...@@ -549,8 +491,7 @@ class PlateRecognizer(object): ...@@ -549,8 +491,7 @@ class PlateRecognizer(object):
def __init__(self): def __init__(self):
use_gpu = FLAGS.device.lower() == "gpu" use_gpu = FLAGS.device.lower() == "gpu"
self.platedetector = PlateDetector(FLAGS) self.platedetector = PlateDetector(FLAGS)
self.textrecognizer = TextRecognizer( self.textrecognizer = TextRecognizer(FLAGS, use_gpu=use_gpu)
FLAGS, use_gpu=use_gpu, benchmark=FLAGS.run_benchmark)
def get_platelicense(self, image_list): def get_platelicense(self, image_list):
plate_text_list = [] plate_text_list = []
...@@ -582,35 +523,11 @@ class PlateRecognizer(object): ...@@ -582,35 +523,11 @@ class PlateRecognizer(object):
def main(): def main():
detector = PlateRecognizer() detector = PlateRecognizer()
# predict from image # predict from image
if FLAGS.image_dir is None and FLAGS.image_file is not None:
assert FLAGS.batch_size == 1, "batch_size should be 1, when image_file is not None"
img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file) img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
for img in img_list: for img in img_list:
image = cv2.imread(img) image = cv2.imread(img)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = detector.get_platelicense([image]) results = detector.get_platelicense([image])
if FLAGS.run_benchmark:
mems = {
'cpu_rss_mb': detector.cpu_mem / len(img_list),
'gpu_rss_mb': detector.gpu_mem / len(img_list),
'gpu_util': detector.gpu_util * 100 / len(img_list)
}
perf_info = detector.self.autolog.times.report(average=True)
model_dir = FLAGS.model_dir
mode = FLAGS.run_mode
model_info = {
'model_name': model_dir.strip('/').split('/')[-1],
'precision': mode.split('_')[-1]
}
data_info = {
'batch_size': FLAGS.batch_size,
'shape': "dynamic_shape",
'data_num': perf_info['img_num']
}
det_log = PaddleInferBenchmark(detector.config, model_info, data_info,
perf_info, mems)
det_log('Attr')
if __name__ == '__main__': if __name__ == '__main__':
...@@ -621,6 +538,6 @@ if __name__ == '__main__': ...@@ -621,6 +538,6 @@ if __name__ == '__main__':
FLAGS.device = FLAGS.device.upper() FLAGS.device = FLAGS.device.upper()
assert FLAGS.device in ['CPU', 'GPU', 'XPU' assert FLAGS.device in ['CPU', 'GPU', 'XPU'
], "device should be CPU, GPU or XPU" ], "device should be CPU, GPU or XPU"
assert not FLAGS.use_gpu, "use_gpu has been deprecated, please use --device" # assert not FLAGS.use_gpu, "use_gpu has been deprecated, please use --device"
main() main()
...@@ -26,12 +26,10 @@ import time ...@@ -26,12 +26,10 @@ import time
import ast import ast
def str2bool(v):
return v.lower() in ("true", "t", "1")
def argsparser(): def argsparser():
parser = argparse.ArgumentParser(description=__doc__) parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--config", type=str, default=None, help=("Path of configure"))
parser.add_argument("--det_algorithm", type=str, default='DB') parser.add_argument("--det_algorithm", type=str, default='DB')
parser.add_argument("--det_model_dir", type=str) parser.add_argument("--det_model_dir", type=str)
parser.add_argument("--det_limit_side_len", type=float, default=960) parser.add_argument("--det_limit_side_len", type=float, default=960)
...@@ -51,21 +49,24 @@ def argsparser(): ...@@ -51,21 +49,24 @@ def argsparser():
type=str, type=str,
default=None, default=None,
help="Dir of image file, `image_file` has a higher priority.") help="Dir of image file, `image_file` has a higher priority.")
parser.add_argument(
"--batch_size", type=int, default=1, help="batch_size for inference.")
parser.add_argument( parser.add_argument(
"--video_file", "--video_file",
type=str, type=str,
default=None, default=None,
help="Path of video file, `video_file` or `camera_id` has a highest priority." help="Path of video file, `video_file` or `camera_id` has a highest priority."
) )
parser.add_argument(
"--video_dir",
type=str,
default=None,
help="Dir of video file, `video_file` has a higher priority.")
parser.add_argument(
"--model_dir", nargs='*', help="set model dir in pipeline")
parser.add_argument( parser.add_argument(
"--camera_id", "--camera_id",
type=int, type=int,
default=-1, default=-1,
help="device id of camera to predict.") help="device id of camera to predict.")
parser.add_argument(
"--threshold", type=float, default=0.5, help="Threshold of score.")
parser.add_argument( parser.add_argument(
"--output_dir", "--output_dir",
type=str, type=str,
...@@ -82,26 +83,11 @@ def argsparser(): ...@@ -82,26 +83,11 @@ def argsparser():
default='cpu', default='cpu',
help="Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU." help="Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU."
) )
parser.add_argument(
"--use_gpu",
type=ast.literal_eval,
default=False,
help="Deprecated, please use `--device`.")
parser.add_argument(
"--run_benchmark",
type=ast.literal_eval,
default=False,
help="Whether to predict a image_file repeatedly for benchmark")
parser.add_argument( parser.add_argument(
"--enable_mkldnn", "--enable_mkldnn",
type=ast.literal_eval, type=ast.literal_eval,
default=False, default=False,
help="Whether use mkldnn with CPU.") help="Whether use mkldnn with CPU.")
parser.add_argument(
"--enable_mkldnn_bfloat16",
type=ast.literal_eval,
default=False,
help="Whether use mkldnn bfloat16 inference with CPU.")
parser.add_argument( parser.add_argument(
"--cpu_threads", type=int, default=1, help="Num of threads with CPU.") "--cpu_threads", type=int, default=1, help="Num of threads with CPU.")
parser.add_argument( parser.add_argument(
...@@ -123,62 +109,20 @@ def argsparser(): ...@@ -123,62 +109,20 @@ def argsparser():
help="If the model is produced by TRT offline quantitative " help="If the model is produced by TRT offline quantitative "
"calibration, trt_calib_mode need to set True.") "calibration, trt_calib_mode need to set True.")
parser.add_argument( parser.add_argument(
'--save_images', "--do_entrance_counting",
action='store_true',
help='Save visualization image results.')
parser.add_argument(
'--save_mot_txts',
action='store_true',
help='Save tracking results (txt).')
parser.add_argument(
'--save_mot_txt_per_img',
action='store_true', action='store_true',
help='Save tracking results (txt) for each image.') help="Whether counting the numbers of identifiers entering "
"or getting out from the entrance. Note that only support one-class"
"counting, multi-class counting is coming soon.")
parser.add_argument( parser.add_argument(
'--scaled', "--secs_interval",
type=bool,
default=False,
help="Whether coords after detector outputs are scaled, False in JDE YOLOv3 "
"True in general detector.")
parser.add_argument(
"--tracker_config", type=str, default=None, help=("tracker donfig"))
parser.add_argument(
"--reid_model_dir",
type=str,
default=None,
help=("Directory include:'model.pdiparams', 'model.pdmodel', "
"'infer_cfg.yml', created by tools/export_model.py."))
parser.add_argument(
"--reid_batch_size",
type=int,
default=50,
help="max batch_size for reid model inference.")
parser.add_argument(
'--use_dark',
type=ast.literal_eval,
default=True,
help='whether to use darkpose to get better keypoint position predict ')
parser.add_argument(
"--action_file",
type=str,
default=None,
help="Path of input file for action recognition.")
parser.add_argument(
"--window_size",
type=int, type=int,
default=50, default=2,
help="Temporal size of skeleton feature for action recognition.") help="The seconds interval to count after tracking")
parser.add_argument(
"--random_pad",
type=ast.literal_eval,
default=False,
help="Whether do random padding for action recognition.")
parser.add_argument( parser.add_argument(
"--save_results", "--draw_center_traj",
type=bool, action='store_true',
default=False, help="Whether drawing the trajectory of center")
help="Whether save detection result to file using coco format")
return parser return parser
...@@ -208,6 +152,8 @@ def create_predictor(args, mode): ...@@ -208,6 +152,8 @@ def create_predictor(args, mode):
config = inference.Config(model_file_path, params_file_path) config = inference.Config(model_file_path, params_file_path)
batch_size = 1
if args.device == "GPU": if args.device == "GPU":
gpu_id = get_infer_gpuid() gpu_id = get_infer_gpuid()
if gpu_id is None: if gpu_id is None:
...@@ -299,12 +245,12 @@ def create_predictor(args, mode): ...@@ -299,12 +245,12 @@ def create_predictor(args, mode):
elif mode == "rec": elif mode == "rec":
imgH = int(args.rec_image_shape.split(',')[-2]) imgH = int(args.rec_image_shape.split(',')[-2])
min_input_shape = {"x": [1, 3, imgH, 10]} min_input_shape = {"x": [1, 3, imgH, 10]}
max_input_shape = {"x": [args.batch_size, 3, imgH, 2304]} max_input_shape = {"x": [batch_size, 3, imgH, 2304]}
opt_input_shape = {"x": [args.batch_size, 3, imgH, 320]} opt_input_shape = {"x": [batch_size, 3, imgH, 320]}
elif mode == "cls": elif mode == "cls":
min_input_shape = {"x": [1, 3, 48, 10]} min_input_shape = {"x": [1, 3, 48, 10]}
max_input_shape = {"x": [args.batch_size, 3, 48, 1024]} max_input_shape = {"x": [batch_size, 3, 48, 1024]}
opt_input_shape = {"x": [args.batch_size, 3, 48, 320]} opt_input_shape = {"x": [batch_size, 3, 48, 320]}
else: else:
use_dynamic_shape = False use_dynamic_shape = False
if use_dynamic_shape: if use_dynamic_shape:
......
...@@ -27,14 +27,6 @@ def argsparser(): ...@@ -27,14 +27,6 @@ def argsparser():
help=("Directory include:'model.pdiparams', 'model.pdmodel', " help=("Directory include:'model.pdiparams', 'model.pdmodel', "
"'infer_cfg.yml', created by tools/export_model.py."), "'infer_cfg.yml', created by tools/export_model.py."),
required=True) required=True)
parser.add_argument("--det_algorithm", type=str, default='DB')
parser.add_argument("--det_model_dir", type=str)
parser.add_argument("--det_limit_side_len", type=float, default=960)
parser.add_argument("--det_limit_type", type=str, default='max')
parser.add_argument("--rec_algorithm", type=str, default='SVTR_LCNet')
parser.add_argument("--rec_model_dir", type=str)
parser.add_argument("--rec_image_shape", type=str, default="3, 48, 320")
parser.add_argument("--rec_batch_num", type=int, default=6)
parser.add_argument( parser.add_argument(
"--image_file", type=str, default=None, help="Path of image file.") "--image_file", type=str, default=None, help="Path of image file.")
parser.add_argument( parser.add_argument(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册