未验证 提交 07f3743d 编写于 作者: S shangliang Xu 提交者: GitHub

[TIPC] fix bug in train benchmark (#6509)

上级 190e237b
...@@ -152,17 +152,11 @@ class Detector(object): ...@@ -152,17 +152,11 @@ class Detector(object):
def postprocess(self, inputs, result): def postprocess(self, inputs, result):
# postprocess output of predictor # postprocess output of predictor
np_boxes_num = result['boxes_num'] np_boxes_num = result['boxes_num']
out_result = {k: [] for k, v in result.items() if v is not None} if sum(np_boxes_num) <= 0:
idx = 0
for num_box in np_boxes_num:
for k, v in out_result.items():
v.append(result[k][idx:idx + num_box])
idx += num_box
if num_box == 0:
print('[WARNNING] No object detected.') print('[WARNNING] No object detected.')
out_result = {k: np.concatenate(v) for k, v in out_result.items()} result = {'boxes': np.zeros([0, 6]), 'boxes_num': [0]}
out_result['boxes_num'] = result['boxes_num'] result = {k: v for k, v in result.items() if v is not None}
return out_result return result
def filter_box(self, result, threshold): def filter_box(self, result, threshold):
np_boxes_num = result['boxes_num'] np_boxes_num = result['boxes_num']
......
...@@ -39,6 +39,7 @@ class COCODataSet(DetDataset): ...@@ -39,6 +39,7 @@ class COCODataSet(DetDataset):
empty_ratio (float): the ratio of empty record number to total empty_ratio (float): the ratio of empty record number to total
record's, if empty_ratio is out of [0. ,1.), do not sample the record's, if empty_ratio is out of [0. ,1.), do not sample the
records and use all the empty entries. 1. as default records and use all the empty entries. 1. as default
repeat (int): repeat times for dataset, use in benchmark.
""" """
def __init__(self, def __init__(self,
...@@ -49,9 +50,15 @@ class COCODataSet(DetDataset): ...@@ -49,9 +50,15 @@ class COCODataSet(DetDataset):
sample_num=-1, sample_num=-1,
load_crowd=False, load_crowd=False,
allow_empty=False, allow_empty=False,
empty_ratio=1.): empty_ratio=1.,
super(COCODataSet, self).__init__(dataset_dir, image_dir, anno_path, repeat=1):
data_fields, sample_num) super(COCODataSet, self).__init__(
dataset_dir,
image_dir,
anno_path,
data_fields,
sample_num,
repeat=repeat)
self.load_image_only = False self.load_image_only = False
self.load_semantic = False self.load_semantic = False
self.load_crowd = load_crowd self.load_crowd = load_crowd
......
...@@ -38,6 +38,7 @@ class DetDataset(Dataset): ...@@ -38,6 +38,7 @@ class DetDataset(Dataset):
data_fields (list): key name of data dictionary, at least have 'image'. data_fields (list): key name of data dictionary, at least have 'image'.
sample_num (int): number of samples to load, -1 means all. sample_num (int): number of samples to load, -1 means all.
use_default_label (bool): whether to load default label list. use_default_label (bool): whether to load default label list.
repeat (int): repeat times for dataset, use in benchmark.
""" """
def __init__(self, def __init__(self,
...@@ -47,6 +48,7 @@ class DetDataset(Dataset): ...@@ -47,6 +48,7 @@ class DetDataset(Dataset):
data_fields=['image'], data_fields=['image'],
sample_num=-1, sample_num=-1,
use_default_label=None, use_default_label=None,
repeat=1,
**kwargs): **kwargs):
super(DetDataset, self).__init__() super(DetDataset, self).__init__()
self.dataset_dir = dataset_dir if dataset_dir is not None else '' self.dataset_dir = dataset_dir if dataset_dir is not None else ''
...@@ -55,16 +57,19 @@ class DetDataset(Dataset): ...@@ -55,16 +57,19 @@ class DetDataset(Dataset):
self.data_fields = data_fields self.data_fields = data_fields
self.sample_num = sample_num self.sample_num = sample_num
self.use_default_label = use_default_label self.use_default_label = use_default_label
self.repeat = repeat
self._epoch = 0 self._epoch = 0
self._curr_iter = 0 self._curr_iter = 0
def __len__(self, ): def __len__(self, ):
return len(self.roidbs) return len(self.roidbs) * self.repeat
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
return self return self
def __getitem__(self, idx): def __getitem__(self, idx):
if self.repeat > 1:
idx %= self.repeat
# data batch # data batch
roidb = copy.deepcopy(self.roidbs[idx]) roidb = copy.deepcopy(self.roidbs[idx])
if self.mixup_epoch == 0 or self._epoch < self.mixup_epoch: if self.mixup_epoch == 0 or self._epoch < self.mixup_epoch:
......
...@@ -39,6 +39,7 @@ class MOTDataSet(DetDataset): ...@@ -39,6 +39,7 @@ class MOTDataSet(DetDataset):
image_lists (str|list): mot data image lists, muiti-source mot dataset. image_lists (str|list): mot data image lists, muiti-source mot dataset.
data_fields (list): key name of data dictionary, at least have 'image'. data_fields (list): key name of data dictionary, at least have 'image'.
sample_num (int): number of samples to load, -1 means all. sample_num (int): number of samples to load, -1 means all.
repeat (int): repeat times for dataset, use in benchmark.
Notes: Notes:
MOT datasets root directory following this: MOT datasets root directory following this:
...@@ -77,11 +78,13 @@ class MOTDataSet(DetDataset): ...@@ -77,11 +78,13 @@ class MOTDataSet(DetDataset):
dataset_dir=None, dataset_dir=None,
image_lists=[], image_lists=[],
data_fields=['image'], data_fields=['image'],
sample_num=-1): sample_num=-1,
repeat=1):
super(MOTDataSet, self).__init__( super(MOTDataSet, self).__init__(
dataset_dir=dataset_dir, dataset_dir=dataset_dir,
data_fields=data_fields, data_fields=data_fields,
sample_num=sample_num) sample_num=sample_num,
repeat=repeat)
self.dataset_dir = dataset_dir self.dataset_dir = dataset_dir
self.image_lists = image_lists self.image_lists = image_lists
if isinstance(self.image_lists, str): if isinstance(self.image_lists, str):
...@@ -95,7 +98,8 @@ class MOTDataSet(DetDataset): ...@@ -95,7 +98,8 @@ class MOTDataSet(DetDataset):
# only used to get categories and metric # only used to get categories and metric
# only check first data, but the label_list of all data should be same. # only check first data, but the label_list of all data should be same.
first_mot_data = self.image_lists[0].split('.')[0] first_mot_data = self.image_lists[0].split('.')[0]
anno_file = os.path.join(self.dataset_dir, first_mot_data, 'label_list.txt') anno_file = os.path.join(self.dataset_dir, first_mot_data,
'label_list.txt')
return anno_file return anno_file
def parse_dataset(self): def parse_dataset(self):
...@@ -276,7 +280,8 @@ class MCMOTDataSet(DetDataset): ...@@ -276,7 +280,8 @@ class MCMOTDataSet(DetDataset):
# only used to get categories and metric # only used to get categories and metric
# only check first data, but the label_list of all data should be same. # only check first data, but the label_list of all data should be same.
first_mot_data = self.image_lists[0].split('.')[0] first_mot_data = self.image_lists[0].split('.')[0]
anno_file = os.path.join(self.dataset_dir, first_mot_data, 'label_list.txt') anno_file = os.path.join(self.dataset_dir, first_mot_data,
'label_list.txt')
return anno_file return anno_file
def parse_dataset(self): def parse_dataset(self):
...@@ -576,6 +581,7 @@ class MOTImageFolder(DetDataset): ...@@ -576,6 +581,7 @@ class MOTImageFolder(DetDataset):
def get_anno(self): def get_anno(self):
return self.anno_path return self.anno_path
def _is_valid_video(f, extensions=('.mp4', '.avi', '.mov', '.rmvb', 'flv')): def _is_valid_video(f, extensions=('.mp4', '.avi', '.mov', '.rmvb', 'flv')):
return f.lower().endswith(extensions) return f.lower().endswith(extensions)
......
...@@ -46,6 +46,7 @@ class VOCDataSet(DetDataset): ...@@ -46,6 +46,7 @@ class VOCDataSet(DetDataset):
empty_ratio (float): the ratio of empty record number to total empty_ratio (float): the ratio of empty record number to total
record's, if empty_ratio is out of [0. ,1.), do not sample the record's, if empty_ratio is out of [0. ,1.), do not sample the
records and use all the empty entries. 1. as default records and use all the empty entries. 1. as default
repeat (int): repeat times for dataset, use in benchmark.
""" """
def __init__(self, def __init__(self,
...@@ -56,13 +57,15 @@ class VOCDataSet(DetDataset): ...@@ -56,13 +57,15 @@ class VOCDataSet(DetDataset):
sample_num=-1, sample_num=-1,
label_list=None, label_list=None,
allow_empty=False, allow_empty=False,
empty_ratio=1.): empty_ratio=1.,
repeat=1):
super(VOCDataSet, self).__init__( super(VOCDataSet, self).__init__(
dataset_dir=dataset_dir, dataset_dir=dataset_dir,
image_dir=image_dir, image_dir=image_dir,
anno_path=anno_path, anno_path=anno_path,
data_fields=data_fields, data_fields=data_fields,
sample_num=sample_num) sample_num=sample_num,
repeat=repeat)
self.label_list = label_list self.label_list = label_list
self.allow_empty = allow_empty self.allow_empty = allow_empty
self.empty_ratio = empty_ratio self.empty_ratio = empty_ratio
......
...@@ -83,6 +83,9 @@ line_num=`expr $line_num + 1` ...@@ -83,6 +83,9 @@ line_num=`expr $line_num + 1`
fp_items=$(func_parser_value "${lines[line_num]}") fp_items=$(func_parser_value "${lines[line_num]}")
line_num=`expr $line_num + 1` line_num=`expr $line_num + 1`
epoch=$(func_parser_value "${lines[line_num]}") epoch=$(func_parser_value "${lines[line_num]}")
eval "sed -i '10i\ repeat: ${epoch}' configs/datasets/coco_detection.yml"
eval "sed -i '10i\ repeat: ${epoch}' configs/datasets/coco_instance.yml"
eval "sed -i '10i\ repeat: ${epoch}' configs/datasets/mot.yml"
line_num=`expr $line_num + 1` line_num=`expr $line_num + 1`
profile_option_key=$(func_parser_key "${lines[line_num]}") profile_option_key=$(func_parser_key "${lines[line_num]}")
...@@ -157,7 +160,7 @@ for batch_size in ${batch_size_list[*]}; do ...@@ -157,7 +160,7 @@ for batch_size in ${batch_size_list[*]}; do
# sed batchsize and precision # sed batchsize and precision
func_sed_params "$FILENAME" "${line_precision}" "$precision" func_sed_params "$FILENAME" "${line_precision}" "$precision"
func_sed_params "$FILENAME" "${line_batchsize}" "$MODE=$batch_size" func_sed_params "$FILENAME" "${line_batchsize}" "$MODE=$batch_size"
func_sed_params "$FILENAME" "${line_epoch}" "$MODE=$epoch" func_sed_params "$FILENAME" "${line_epoch}" "$MODE=1"
gpu_id=$(set_gpu_id $device_num) gpu_id=$(set_gpu_id $device_num)
if [ ${#gpu_id} -le 1 ];then if [ ${#gpu_id} -le 1 ];then
......
...@@ -51,3 +51,9 @@ inference:./deploy/python/keypoint_infer.py ...@@ -51,3 +51,9 @@ inference:./deploy/python/keypoint_infer.py
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
random_infer_input:[{float32,[3,128,96]}] random_infer_input:[{float32,[3,128,96]}]
===========================train_benchmark_params==========================
batch_size:512
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py ...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x416x416_2.npy numpy_infer_input:3x416x416_2.npy
===========================train_benchmark_params==========================
batch_size:80
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
...@@ -6,23 +6,25 @@ filename:null ...@@ -6,23 +6,25 @@ filename:null
--output_dir:./output_inference --output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams
norm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml -o norm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml -o
quant_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config configs/picodet/picodet_s_320_coco_lcnet.yml -o quant_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_pact -o
fpgm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config configs/picodet/picodet_s_320_coco_lcnet.yml -o fpgm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_fpgm -o
distill_export:null distill_export:null
export1:null export1:null
export2:null export_param:null
kl_quant_export:tools/post_quant.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config configs/picodet/picodet_s_320_coco_lcnet.yml -o kl_quant_export:tools/post_quant.py -c configs/picodet/yolov3_darknet53_270e_coco.yml --slim_config _template_kl_quant -o
## ##
2onnx: paddle2onnx infer_mode:norm
--model_dir:./output_inference/picodet_s_320_coco_lcnet/ infer_quant:False
cmd:paddle2onnx
--model_dir:null
--model_filename:model.pdmodel --model_filename:model.pdmodel
--params_filename:model.pdiparams --params_filename:model.pdiparams
--save_file:./deploy/third_engine/demo_onnxruntime/onnx_file/picodet_s_320_coco.onnx --save_file:model.onnx
--opset_version:11 --opset_version:11
## --enable_onnx_checker:True
inference:infer_demo.py paddle2onnx_param1:null
--modelpath:./onnx_file/picodet_s_320_coco.onnx infer_py:./deploy/third_engine/onnx/infer.py
--img_fold:./imgs --infer_cfg:null
--result_fold:results --onnx_file:null
infer_mode:norm --image_file:./demo/000000014439.jpg
null:null infer_param1:null
\ No newline at end of file \ No newline at end of file
...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py ...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x320x320_2.npy numpy_infer_input:3x320x320_2.npy
===========================train_benchmark_params==========================
batch_size:128
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py ...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x320x320_2.npy numpy_infer_input:3x320x320_2.npy
===========================train_benchmark_params==========================
batch_size:128
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py ...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x320x320.npy numpy_infer_input:3x320x320.npy
===========================train_benchmark_params==========================
batch_size:24
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py ...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x608x608.npy numpy_infer_input:3x608x608.npy
===========================train_benchmark_params==========================
batch_size:24
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py ...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x320x320.npy numpy_infer_input:3x320x320.npy
===========================train_benchmark_params==========================
batch_size:32
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py ...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
null:null null:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x640x640.npy numpy_infer_input:3x640x640.npy
===========================train_benchmark_params==========================
batch_size:12
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py ...@@ -51,3 +51,9 @@ inference:./deploy/python/infer.py
--trt_max_shape:1600 --trt_max_shape:1600
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x640x640.npy numpy_infer_input:3x640x640.npy
===========================train_benchmark_params==========================
batch_size:32
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册