未验证 提交 1089bcf6 编写于 作者: W wangguanzhong 提交者: GitHub

fix voc eval (#2731)

* fix voc eval

* remove debug code

* add comments

* update comment for collate_batch
上级 38a9b7c5
...@@ -13,6 +13,12 @@ TrainReader: ...@@ -13,6 +13,12 @@ TrainReader:
mixup_epoch: 350 mixup_epoch: 350
batch_size: 12 batch_size: 12
# set collate_batch to false because ground-truth info is needed
# on voc dataset and should not collate data in batch when batch size
# is larger than 1.
EvalReader:
collate_batch: false
epoch: 583 epoch: 583
LearningRate: LearningRate:
......
...@@ -6,3 +6,9 @@ _BASE_: [ ...@@ -6,3 +6,9 @@ _BASE_: [
'_base_/ssd_mobilenet_reader.yml', '_base_/ssd_mobilenet_reader.yml',
] ]
weights: output/ssd_mobilenet_v1_300_120e_voc/model_final weights: output/ssd_mobilenet_v1_300_120e_voc/model_final
# set collate_batch to false because ground-truth info is needed
# on voc dataset and should not collate data in batch when batch size
# is larger than 1.
EvalReader:
collate_batch: false
...@@ -6,3 +6,9 @@ _BASE_: [ ...@@ -6,3 +6,9 @@ _BASE_: [
'_base_/ssd_reader.yml', '_base_/ssd_reader.yml',
] ]
weights: output/ssd_vgg16_300_240e_voc/model_final weights: output/ssd_vgg16_300_240e_voc/model_final
# set collate_batch to false because ground-truth info is needed
# on voc dataset and should not collate data in batch when batch size
# is larger than 1.
EvalReader:
collate_batch: false
...@@ -8,3 +8,9 @@ _BASE_: [ ...@@ -8,3 +8,9 @@ _BASE_: [
snapshot_epoch: 5 snapshot_epoch: 5
weights: output/yolov3_darknet53_270e_voc/model_final weights: output/yolov3_darknet53_270e_voc/model_final
# set collate_batch to false because ground-truth info is needed
# on voc dataset and should not collate data in batch when batch size
# is larger than 1.
EvalReader:
collate_batch: false
...@@ -9,6 +9,12 @@ _BASE_: [ ...@@ -9,6 +9,12 @@ _BASE_: [
snapshot_epoch: 5 snapshot_epoch: 5
weights: output/yolov3_mobilenet_v1_270e_voc/model_final weights: output/yolov3_mobilenet_v1_270e_voc/model_final
# set collate_batch to false because ground-truth info is needed
# on voc dataset and should not collate data in batch when batch size
# is larger than 1.
EvalReader:
collate_batch: false
LearningRate: LearningRate:
base_lr: 0.001 base_lr: 0.001
schedulers: schedulers:
......
...@@ -10,6 +10,12 @@ snapshot_epoch: 5 ...@@ -10,6 +10,12 @@ snapshot_epoch: 5
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/MobileNetV1_ssld_pretrained.pdparams pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/MobileNetV1_ssld_pretrained.pdparams
weights: output/yolov3_mobilenet_v1_ssld_270e_voc/model_final weights: output/yolov3_mobilenet_v1_ssld_270e_voc/model_final
# set collate_batch to false because ground-truth info is needed
# on voc dataset and should not collate data in batch when batch size
# is larger than 1.
EvalReader:
collate_batch: false
LearningRate: LearningRate:
base_lr: 0.001 base_lr: 0.001
schedulers: schedulers:
......
...@@ -9,6 +9,12 @@ _BASE_: [ ...@@ -9,6 +9,12 @@ _BASE_: [
snapshot_epoch: 5 snapshot_epoch: 5
weights: output/yolov3_mobilenet_v3_large_270e_voc/model_final weights: output/yolov3_mobilenet_v3_large_270e_voc/model_final
# set collate_batch to false because ground-truth info is needed
# on voc dataset and should not collate data in batch when batch size
# is larger than 1.
EvalReader:
collate_batch: false
LearningRate: LearningRate:
base_lr: 0.001 base_lr: 0.001
schedulers: schedulers:
......
...@@ -10,6 +10,12 @@ snapshot_epoch: 5 ...@@ -10,6 +10,12 @@ snapshot_epoch: 5
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/MobileNetV3_large_x1_0_ssld_pretrained.pdparams pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/MobileNetV3_large_x1_0_ssld_pretrained.pdparams
weights: output/yolov3_mobilenet_v3_large_ssld_270e_voc/model_final weights: output/yolov3_mobilenet_v3_large_ssld_270e_voc/model_final
# set collate_batch to false because ground-truth info is needed
# on voc dataset and should not collate data in batch when batch size
# is larger than 1.
EvalReader:
collate_batch: false
LearningRate: LearningRate:
base_lr: 0.001 base_lr: 0.001
schedulers: schedulers:
......
...@@ -95,10 +95,9 @@ class BatchCompose(Compose): ...@@ -95,10 +95,9 @@ class BatchCompose(Compose):
tmp_data = [] tmp_data = []
for i in range(len(data)): for i in range(len(data)):
tmp_data.append(data[i][k]) tmp_data.append(data[i][k])
if not 'gt_' in k and not 'is_crowd' in k: if not 'gt_' in k and not 'is_crowd' in k and not 'difficult' in k:
tmp_data = np.stack(tmp_data, axis=0) tmp_data = np.stack(tmp_data, axis=0)
batch_data[k] = tmp_data batch_data[k] = tmp_data
return batch_data return batch_data
...@@ -118,6 +117,11 @@ class BaseDataLoader(object): ...@@ -118,6 +117,11 @@ class BaseDataLoader(object):
drop_empty (bool): whether to drop samples with no ground drop_empty (bool): whether to drop samples with no ground
truth labels, default True truth labels, default True
num_classes (int): class number of dataset, default 80 num_classes (int): class number of dataset, default 80
collate_batch (bool): whether to collate batch in dataloader.
If set to True, the samples will collate into batch according
to the batch size. Otherwise, the ground-truth will not collate,
which is used when the number of ground-truch is different in
samples.
use_shared_memory (bool): whether to use shared memory to use_shared_memory (bool): whether to use shared memory to
accelerate data loading, enable this only if you accelerate data loading, enable this only if you
are sure that the shared memory size of your OS are sure that the shared memory size of your OS
......
...@@ -202,9 +202,9 @@ class VOCMetric(Metric): ...@@ -202,9 +202,9 @@ class VOCMetric(Metric):
if bboxes.shape == (1, 1) or bboxes is None: if bboxes.shape == (1, 1) or bboxes is None:
return return
gt_boxes = inputs['gt_bbox'].numpy() gt_boxes = inputs['gt_bbox']
gt_labels = inputs['gt_class'].numpy() gt_labels = inputs['gt_class']
difficults = inputs['difficult'].numpy() if not self.evaluate_difficult \ difficults = inputs['difficult'] if not self.evaluate_difficult \
else None else None
scale_factor = inputs['scale_factor'].numpy( scale_factor = inputs['scale_factor'].numpy(
...@@ -212,13 +212,13 @@ class VOCMetric(Metric): ...@@ -212,13 +212,13 @@ class VOCMetric(Metric):
(gt_boxes.shape[0], 2)).astype('float32') (gt_boxes.shape[0], 2)).astype('float32')
bbox_idx = 0 bbox_idx = 0
for i in range(gt_boxes.shape[0]): for i in range(len(gt_boxes)):
gt_box = gt_boxes[i] gt_box = gt_boxes[i].numpy()
h, w = scale_factor[i] h, w = scale_factor[i]
gt_box = gt_box / np.array([w, h, w, h]) gt_box = gt_box / np.array([w, h, w, h])
gt_label = gt_labels[i] gt_label = gt_labels[i].numpy()
difficult = None if difficults is None \ difficult = None if difficults is None \
else difficults[i] else difficults[i].numpy()
bbox_num = bbox_lengths[i] bbox_num = bbox_lengths[i]
bbox = bboxes[bbox_idx:bbox_idx + bbox_num] bbox = bboxes[bbox_idx:bbox_idx + bbox_num]
score = scores[bbox_idx:bbox_idx + bbox_num] score = scores[bbox_idx:bbox_idx + bbox_num]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册