未验证 提交 07f3743d 编写于 作者: S shangliang Xu 提交者: GitHub

[TIPC] fix bug in train benchmark (#6509)

上级 190e237b
......@@ -152,17 +152,11 @@ class Detector(object):
def postprocess(self, inputs, result):
# postprocess output of predictor
np_boxes_num = result['boxes_num']
out_result = {k: [] for k, v in result.items() if v is not None}
idx = 0
for num_box in np_boxes_num:
for k, v in out_result.items():
v.append(result[k][idx:idx + num_box])
idx += num_box
if num_box == 0:
if sum(np_boxes_num) <= 0:
print('[WARNNING] No object detected.')
out_result = {k: np.concatenate(v) for k, v in out_result.items()}
out_result['boxes_num'] = result['boxes_num']
return out_result
result = {'boxes': np.zeros([0, 6]), 'boxes_num': [0]}
result = {k: v for k, v in result.items() if v is not None}
return result
def filter_box(self, result, threshold):
np_boxes_num = result['boxes_num']
......
......@@ -39,6 +39,7 @@ class COCODataSet(DetDataset):
empty_ratio (float): the ratio of empty record number to total
record's, if empty_ratio is out of [0. ,1.), do not sample the
records and use all the empty entries. 1. as default
repeat (int): repeat times for dataset, use in benchmark.
"""
def __init__(self,
......@@ -49,9 +50,15 @@ class COCODataSet(DetDataset):
sample_num=-1,
load_crowd=False,
allow_empty=False,
empty_ratio=1.):
super(COCODataSet, self).__init__(dataset_dir, image_dir, anno_path,
data_fields, sample_num)
empty_ratio=1.,
repeat=1):
super(COCODataSet, self).__init__(
dataset_dir,
image_dir,
anno_path,
data_fields,
sample_num,
repeat=repeat)
self.load_image_only = False
self.load_semantic = False
self.load_crowd = load_crowd
......
......@@ -38,6 +38,7 @@ class DetDataset(Dataset):
data_fields (list): key name of data dictionary, at least have 'image'.
sample_num (int): number of samples to load, -1 means all.
use_default_label (bool): whether to load default label list.
repeat (int): repeat times for dataset, use in benchmark.
"""
def __init__(self,
......@@ -47,6 +48,7 @@ class DetDataset(Dataset):
data_fields=['image'],
sample_num=-1,
use_default_label=None,
repeat=1,
**kwargs):
super(DetDataset, self).__init__()
self.dataset_dir = dataset_dir if dataset_dir is not None else ''
......@@ -55,16 +57,19 @@ class DetDataset(Dataset):
self.data_fields = data_fields
self.sample_num = sample_num
self.use_default_label = use_default_label
self.repeat = repeat
self._epoch = 0
self._curr_iter = 0
def __len__(self, ):
return len(self.roidbs)
return len(self.roidbs) * self.repeat
def __call__(self, *args, **kwargs):
return self
def __getitem__(self, idx):
if self.repeat > 1:
idx %= self.repeat
# data batch
roidb = copy.deepcopy(self.roidbs[idx])
if self.mixup_epoch == 0 or self._epoch < self.mixup_epoch:
......
......@@ -39,6 +39,7 @@ class MOTDataSet(DetDataset):
image_lists (str|list): mot data image lists, muiti-source mot dataset.
data_fields (list): key name of data dictionary, at least have 'image'.
sample_num (int): number of samples to load, -1 means all.
repeat (int): repeat times for dataset, use in benchmark.
Notes:
MOT datasets root directory following this:
......@@ -77,11 +78,13 @@ class MOTDataSet(DetDataset):
dataset_dir=None,
image_lists=[],
data_fields=['image'],
sample_num=-1):
sample_num=-1,
repeat=1):
super(MOTDataSet, self).__init__(
dataset_dir=dataset_dir,
data_fields=data_fields,
sample_num=sample_num)
sample_num=sample_num,
repeat=repeat)
self.dataset_dir = dataset_dir
self.image_lists = image_lists
if isinstance(self.image_lists, str):
......@@ -95,7 +98,8 @@ class MOTDataSet(DetDataset):
# only used to get categories and metric
# only check first data, but the label_list of all data should be same.
first_mot_data = self.image_lists[0].split('.')[0]
anno_file = os.path.join(self.dataset_dir, first_mot_data, 'label_list.txt')
anno_file = os.path.join(self.dataset_dir, first_mot_data,
'label_list.txt')
return anno_file
def parse_dataset(self):
......@@ -276,7 +280,8 @@ class MCMOTDataSet(DetDataset):
# only used to get categories and metric
# only check first data, but the label_list of all data should be same.
first_mot_data = self.image_lists[0].split('.')[0]
anno_file = os.path.join(self.dataset_dir, first_mot_data, 'label_list.txt')
anno_file = os.path.join(self.dataset_dir, first_mot_data,
'label_list.txt')
return anno_file
def parse_dataset(self):
......@@ -576,6 +581,7 @@ class MOTImageFolder(DetDataset):
def get_anno(self):
return self.anno_path
def _is_valid_video(f, extensions=('.mp4', '.avi', '.mov', '.rmvb', 'flv')):
return f.lower().endswith(extensions)
......
......@@ -46,6 +46,7 @@ class VOCDataSet(DetDataset):
empty_ratio (float): the ratio of empty record number to total
record's, if empty_ratio is out of [0. ,1.), do not sample the
records and use all the empty entries. 1. as default
repeat (int): repeat times for dataset, use in benchmark.
"""
def __init__(self,
......@@ -56,13 +57,15 @@ class VOCDataSet(DetDataset):
sample_num=-1,
label_list=None,
allow_empty=False,
empty_ratio=1.):
empty_ratio=1.,
repeat=1):
super(VOCDataSet, self).__init__(
dataset_dir=dataset_dir,
image_dir=image_dir,
anno_path=anno_path,
data_fields=data_fields,
sample_num=sample_num)
sample_num=sample_num,
repeat=repeat)
self.label_list = label_list
self.allow_empty = allow_empty
self.empty_ratio = empty_ratio
......
......@@ -83,6 +83,9 @@ line_num=`expr $line_num + 1`
fp_items=$(func_parser_value "${lines[line_num]}")
line_num=`expr $line_num + 1`
epoch=$(func_parser_value "${lines[line_num]}")
eval "sed -i '10i\ repeat: ${epoch}' configs/datasets/coco_detection.yml"
eval "sed -i '10i\ repeat: ${epoch}' configs/datasets/coco_instance.yml"
eval "sed -i '10i\ repeat: ${epoch}' configs/datasets/mot.yml"
line_num=`expr $line_num + 1`
profile_option_key=$(func_parser_key "${lines[line_num]}")
......@@ -157,7 +160,7 @@ for batch_size in ${batch_size_list[*]}; do
# sed batchsize and precision
func_sed_params "$FILENAME" "${line_precision}" "$precision"
func_sed_params "$FILENAME" "${line_batchsize}" "$MODE=$batch_size"
func_sed_params "$FILENAME" "${line_epoch}" "$MODE=$epoch"
func_sed_params "$FILENAME" "${line_epoch}" "$MODE=1"
gpu_id=$(set_gpu_id $device_num)
if [ ${#gpu_id} -le 1 ];then
......
......@@ -51,3 +51,9 @@ inference:./deploy/python/keypoint_infer.py
null:null
===========================infer_benchmark_params===========================
random_infer_input:[{float32,[3,128,96]}]
===========================train_benchmark_params==========================
batch_size:512
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
......@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
null:null
===========================infer_benchmark_params===========================
numpy_infer_input:3x416x416_2.npy
===========================train_benchmark_params==========================
batch_size:80
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
......@@ -6,23 +6,25 @@ filename:null
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams
norm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml -o
quant_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config configs/picodet/picodet_s_320_coco_lcnet.yml -o
fpgm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config configs/picodet/picodet_s_320_coco_lcnet.yml -o
quant_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_pact -o
fpgm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_fpgm -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config configs/picodet/picodet_s_320_coco_lcnet.yml -o
export_param:null
kl_quant_export:tools/post_quant.py -c configs/picodet/yolov3_darknet53_270e_coco.yml --slim_config _template_kl_quant -o
##
2onnx: paddle2onnx
--model_dir:./output_inference/picodet_s_320_coco_lcnet/
infer_mode:norm
infer_quant:False
cmd:paddle2onnx
--model_dir:null
--model_filename:model.pdmodel
--params_filename:model.pdiparams
--save_file:./deploy/third_engine/demo_onnxruntime/onnx_file/picodet_s_320_coco.onnx
--save_file:model.onnx
--opset_version:11
##
inference:infer_demo.py
--modelpath:./onnx_file/picodet_s_320_coco.onnx
--img_fold:./imgs
--result_fold:results
infer_mode:norm
null:null
\ No newline at end of file
--enable_onnx_checker:True
paddle2onnx_param1:null
infer_py:./deploy/third_engine/onnx/infer.py
--infer_cfg:null
--onnx_file:null
--image_file:./demo/000000014439.jpg
infer_param1:null
\ No newline at end of file
......@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
null:null
===========================infer_benchmark_params===========================
numpy_infer_input:3x320x320_2.npy
===========================train_benchmark_params==========================
batch_size:128
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
......@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
null:null
===========================infer_benchmark_params===========================
numpy_infer_input:3x320x320_2.npy
===========================train_benchmark_params==========================
batch_size:128
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
......@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
null:null
===========================infer_benchmark_params===========================
numpy_infer_input:3x320x320.npy
===========================train_benchmark_params==========================
batch_size:24
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
......@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
null:null
===========================infer_benchmark_params===========================
numpy_infer_input:3x608x608.npy
===========================train_benchmark_params==========================
batch_size:24
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
......@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
null:null
===========================infer_benchmark_params===========================
numpy_infer_input:3x320x320.npy
===========================train_benchmark_params==========================
batch_size:32
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
......@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
null:null
===========================infer_benchmark_params===========================
numpy_infer_input:3x640x640.npy
===========================train_benchmark_params==========================
batch_size:12
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
......@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
--trt_max_shape:1600
===========================infer_benchmark_params===========================
numpy_infer_input:3x640x640.npy
===========================train_benchmark_params==========================
batch_size:32
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册