diff --git a/configs/yolov3_darknet.yml b/configs/yolov3_darknet.yml index fe7ad04a967eb34ae3e5f3953c344455a8d60a6f..9a1c243b8b245c4d0cb60e6ea9f57778e896d263 100644 --- a/configs/yolov3_darknet.yml +++ b/configs/yolov3_darknet.yml @@ -69,6 +69,7 @@ YoloTrainFeed: YoloEvalFeed: batch_size: 8 + image_shape: [3, 608, 608] dataset: dataset_dir: dataset/coco annotation: annotations/instances_val2017.json @@ -76,5 +77,6 @@ YoloEvalFeed: YoloTestFeed: batch_size: 1 + image_shape: [3, 608, 608] dataset: annotation: dataset/coco/annotations/instances_val2017.json diff --git a/configs/yolov3_darknet_voc.yml b/configs/yolov3_darknet_voc.yml index 8b848d778b431de13226d97a37a19757b63b9b7d..08e00a533e04ab4e996086da69dee2dd191c21ee 100644 --- a/configs/yolov3_darknet_voc.yml +++ b/configs/yolov3_darknet_voc.yml @@ -71,6 +71,7 @@ YoloTrainFeed: YoloEvalFeed: batch_size: 8 + image_shape: [3, 608, 608] dataset: dataset_dir: dataset/voc annotation: VOCdevkit/VOC_all/ImageSets/Main/val.txt @@ -79,5 +80,6 @@ YoloEvalFeed: YoloTestFeed: batch_size: 1 + image_shape: [3, 608, 608] dataset: use_default_label: true diff --git a/configs/yolov3_mobilenet_v1.yml b/configs/yolov3_mobilenet_v1.yml index 54f40e7d8f56e3622a13af8755ae5aaf3e347147..3e622025b587b56c6e79dd3a1cf1cbba00901406 100644 --- a/configs/yolov3_mobilenet_v1.yml +++ b/configs/yolov3_mobilenet_v1.yml @@ -70,6 +70,7 @@ YoloTrainFeed: YoloEvalFeed: batch_size: 8 + image_shape: [3, 608, 608] dataset: dataset_dir: dataset/coco annotation: annotations/instances_val2017.json @@ -77,5 +78,6 @@ YoloEvalFeed: YoloTestFeed: batch_size: 1 + image_shape: [3, 608, 608] dataset: annotation: dataset/coco/annotations/instances_val2017.json diff --git a/configs/yolov3_mobilenet_v1_voc.yml b/configs/yolov3_mobilenet_v1_voc.yml index 24945c57ac577b8ae6162b904e782c13bc2a558c..4601f68f33c0eb0c84604380595e965f2f9a5ffb 100644 --- a/configs/yolov3_mobilenet_v1_voc.yml +++ b/configs/yolov3_mobilenet_v1_voc.yml @@ -72,6 +72,7 @@ YoloTrainFeed: YoloEvalFeed: batch_size: 8 + image_shape: [3, 608, 608] dataset: dataset_dir: dataset/voc annotation: VOCdevkit/VOC_all/ImageSets/Main/val.txt @@ -80,5 +81,6 @@ YoloEvalFeed: YoloTestFeed: batch_size: 1 + image_shape: [3, 608, 608] dataset: use_default_label: true diff --git a/configs/yolov3_r34.yml b/configs/yolov3_r34.yml index e7895318de1506c71d2e3836f3274ef16de58fe1..e864f8fd92d2f671c442f3beecd8344171952e48 100644 --- a/configs/yolov3_r34.yml +++ b/configs/yolov3_r34.yml @@ -72,6 +72,7 @@ YoloTrainFeed: YoloEvalFeed: batch_size: 8 + image_shape: [3, 608, 608] dataset: dataset_dir: dataset/coco annotation: annotations/instances_val2017.json @@ -79,5 +80,6 @@ YoloEvalFeed: YoloTestFeed: batch_size: 1 + image_shape: [3, 608, 608] dataset: annotation: dataset/coco/annotations/instances_val2017.json diff --git a/configs/yolov3_r34_voc.yml b/configs/yolov3_r34_voc.yml index fade2a300928195c1b1010010031776c709438ae..cb8e370bba96c942716de208a8100944cab454c6 100644 --- a/configs/yolov3_r34_voc.yml +++ b/configs/yolov3_r34_voc.yml @@ -74,6 +74,7 @@ YoloTrainFeed: YoloEvalFeed: batch_size: 8 + image_shape: [3, 608, 608] dataset: dataset_dir: dataset/voc annotation: VOCdevkit/VOC_all/ImageSets/Main/val.txt @@ -82,5 +83,6 @@ YoloEvalFeed: YoloTestFeed: batch_size: 1 + image_shape: [3, 608, 608] dataset: use_default_label: true diff --git a/ppdet/data/data_feed.py b/ppdet/data/data_feed.py index 9c0357c34b287a93b11e95806f2c37b62e46060a..0d6629c1f7d3dcc8ae8374d02a172cdced70704e 100644 --- a/ppdet/data/data_feed.py +++ b/ppdet/data/data_feed.py @@ -942,6 +942,13 @@ class YoloEvalFeed(DataFeed): self.mode = 'VAL' self.bufsize = 128 + # support image shape config, resize image with image_shape + for i, trans in enumerate(sample_transforms): + if isinstance(trans, ResizeImage): + sample_transforms[i] = ResizeImage( + target_size=self.image_shape[-1], + interp=trans.interp) + @register class YoloTestFeed(DataFeed): @@ -988,4 +995,11 @@ class YoloTestFeed(DataFeed): use_process=use_process) self.mode = 'TEST' self.bufsize = 128 + + # support image shape config, resize image with image_shape + for i, trans in enumerate(sample_transforms): + if isinstance(trans, ResizeImage): + sample_transforms[i] = ResizeImage( + target_size=self.image_shape[-1], + interp=trans.interp) # yapf: enable