From e5137cb275391afd88ccdd2bf86022a4ba467ef4 Mon Sep 17 00:00:00 2001 From: wangguanzhong Date: Tue, 7 Apr 2020 20:17:19 +0800 Subject: [PATCH] fix default value of bufsize (#380) * add devices_num on reader * refine capacity value * refine bufsize --- configs/dcn/yolov3_enhance_reader.yml | 4 ++-- configs/ssd/ssd_mobilenet_v1_voc.yml | 2 +- configs/ssd/ssd_vgg16_300.yml | 4 ++-- configs/ssd/ssd_vgg16_300_voc.yml | 4 ++-- configs/ssd/ssd_vgg16_512.yml | 4 ++-- configs/ssd/ssd_vgg16_512_voc.yml | 4 ++-- configs/yolov3_reader.yml | 4 ++-- ppdet/data/reader.py | 10 +++++++--- ppdet/modeling/architectures/blazeface.py | 2 +- ppdet/modeling/architectures/cascade_mask_rcnn.py | 2 +- ppdet/modeling/architectures/cascade_rcnn.py | 2 +- ppdet/modeling/architectures/cascade_rcnn_cls_aware.py | 2 +- ppdet/modeling/architectures/faceboxes.py | 2 +- ppdet/modeling/architectures/faster_rcnn.py | 2 +- ppdet/modeling/architectures/mask_rcnn.py | 2 +- ppdet/modeling/architectures/retinanet.py | 2 +- ppdet/modeling/architectures/ssd.py | 2 +- ppdet/modeling/architectures/yolov3.py | 2 +- tools/eval.py | 2 +- tools/infer.py | 2 +- tools/train.py | 8 +++++--- 21 files changed, 37 insertions(+), 31 deletions(-) diff --git a/configs/dcn/yolov3_enhance_reader.yml b/configs/dcn/yolov3_enhance_reader.yml index 228e5558a..6616d6b2a 100644 --- a/configs/dcn/yolov3_enhance_reader.yml +++ b/configs/dcn/yolov3_enhance_reader.yml @@ -44,7 +44,7 @@ TrainReader: shuffle: true drop_last: true worker_num: 8 - bufsize: 32 + bufsize: 16 use_process: true EvalReader: @@ -76,7 +76,7 @@ EvalReader: batch_size: 8 drop_empty: false worker_num: 8 - bufsize: 32 + bufsize: 16 TestReader: inputs_def: diff --git a/configs/ssd/ssd_mobilenet_v1_voc.yml b/configs/ssd/ssd_mobilenet_v1_voc.yml index 6d727adfb..ec3c5a21c 100644 --- a/configs/ssd/ssd_mobilenet_v1_voc.yml +++ b/configs/ssd/ssd_mobilenet_v1_voc.yml @@ -116,7 +116,7 @@ EvalReader: std: [127.502231, 127.502231, 127.502231] batch_size: 32 worker_num: 8 - bufsize: 32 + bufsize: 16 use_process: false TestReader: diff --git a/configs/ssd/ssd_vgg16_300.yml b/configs/ssd/ssd_vgg16_300.yml index fed09072d..aaacf022d 100644 --- a/configs/ssd/ssd_vgg16_300.yml +++ b/configs/ssd/ssd_vgg16_300.yml @@ -93,7 +93,7 @@ TrainReader: batch_size: 8 shuffle: true worker_num: 8 - bufsize: 32 + bufsize: 16 use_process: true drop_empty: true @@ -123,7 +123,7 @@ EvalReader: std: [1, 1, 1] batch_size: 16 worker_num: 8 - bufsize: 32 + bufsize: 16 TestReader: inputs_def: diff --git a/configs/ssd/ssd_vgg16_300_voc.yml b/configs/ssd/ssd_vgg16_300_voc.yml index 18d6bc3b8..b74896688 100644 --- a/configs/ssd/ssd_vgg16_300_voc.yml +++ b/configs/ssd/ssd_vgg16_300_voc.yml @@ -93,7 +93,7 @@ TrainReader: batch_size: 8 shuffle: true worker_num: 8 - bufsize: 32 + bufsize: 16 use_process: true EvalReader: @@ -122,7 +122,7 @@ EvalReader: std: [1, 1, 1] batch_size: 32 worker_num: 8 - bufsize: 32 + bufsize: 16 TestReader: inputs_def: diff --git a/configs/ssd/ssd_vgg16_512.yml b/configs/ssd/ssd_vgg16_512.yml index 1383acf42..1ddea5570 100644 --- a/configs/ssd/ssd_vgg16_512.yml +++ b/configs/ssd/ssd_vgg16_512.yml @@ -96,7 +96,7 @@ TrainReader: batch_size: 8 shuffle: true worker_num: 8 - bufsize: 32 + bufsize: 16 use_process: true EvalReader: @@ -124,7 +124,7 @@ EvalReader: std: [1, 1, 1] batch_size: 8 worker_num: 8 - bufsize: 32 + bufsize: 16 drop_empty: false TestReader: diff --git a/configs/ssd/ssd_vgg16_512_voc.yml b/configs/ssd/ssd_vgg16_512_voc.yml index c3224129f..35fd19094 100644 --- a/configs/ssd/ssd_vgg16_512_voc.yml +++ b/configs/ssd/ssd_vgg16_512_voc.yml @@ -97,7 +97,7 @@ TrainReader: batch_size: 8 shuffle: true worker_num: 8 - bufsize: 32 + bufsize: 16 use_process: true EvalReader: @@ -126,7 +126,7 @@ EvalReader: std: [1, 1, 1] batch_size: 32 worker_num: 8 - bufsize: 32 + bufsize: 16 TestReader: inputs_def: diff --git a/configs/yolov3_reader.yml b/configs/yolov3_reader.yml index e539408d1..2a8463f1e 100644 --- a/configs/yolov3_reader.yml +++ b/configs/yolov3_reader.yml @@ -51,7 +51,7 @@ TrainReader: mixup_epoch: 250 drop_last: true worker_num: 8 - bufsize: 32 + bufsize: 16 use_process: true @@ -84,7 +84,7 @@ EvalReader: batch_size: 8 drop_empty: false worker_num: 8 - bufsize: 32 + bufsize: 16 TestReader: inputs_def: diff --git a/ppdet/data/reader.py b/ppdet/data/reader.py index 04eb749ed..b8608fbaf 100644 --- a/ppdet/data/reader.py +++ b/ppdet/data/reader.py @@ -179,6 +179,7 @@ class Reader(object): use_process is true. Default 3G. inputs_def (dict): network input definition use to get input fields, which is used to determine the order of returned data. + devices_num (int): number of devices. """ def __init__(self, @@ -195,9 +196,10 @@ class Reader(object): use_process=False, use_fine_grained_loss=False, num_classes=80, - bufsize=100, + bufsize=-1, memsize='3G', - inputs_def=None): + inputs_def=None, + devices_num=1): self._dataset = dataset self._roidbs = self._dataset.get_roidb() self._fields = copy.deepcopy(inputs_def[ @@ -256,6 +258,7 @@ class Reader(object): self._parallel = None if self._worker_num > -1: task = functools.partial(self.worker, self._drop_empty) + bufsize = devices_num * 2 if bufsize == -1 else bufsize self._parallel = ParallelMap(self, task, worker_num, bufsize, use_process, memsize) @@ -388,7 +391,7 @@ class Reader(object): self._parallel.stop() -def create_reader(cfg, max_iter=0, global_cfg=None): +def create_reader(cfg, max_iter=0, global_cfg=None, devices_num=1): """ Return iterable data reader. @@ -403,6 +406,7 @@ def create_reader(cfg, max_iter=0, global_cfg=None): cfg['use_fine_grained_loss'] = getattr(global_cfg, 'use_fine_grained_loss', False) cfg['num_classes'] = getattr(global_cfg, 'num_classes', 80) + cfg['devices_num'] = devices_num reader = Reader(**cfg)() def _reader(): diff --git a/ppdet/modeling/architectures/blazeface.py b/ppdet/modeling/architectures/blazeface.py index b8a46535e..245a43835 100644 --- a/ppdet/modeling/architectures/blazeface.py +++ b/ppdet/modeling/architectures/blazeface.py @@ -198,7 +198,7 @@ class BlazeFace(object): lod_level=inputs_def[key]['lod_level'])) for key in fields]) loader = fluid.io.DataLoader.from_generator( feed_list=list(feed_vars.values()), - capacity=64, + capacity=16, use_double_buffer=True, iterable=iterable) if use_dataloader else None return feed_vars, loader diff --git a/ppdet/modeling/architectures/cascade_mask_rcnn.py b/ppdet/modeling/architectures/cascade_mask_rcnn.py index c99cfd1c1..30180ddac 100644 --- a/ppdet/modeling/architectures/cascade_mask_rcnn.py +++ b/ppdet/modeling/architectures/cascade_mask_rcnn.py @@ -421,7 +421,7 @@ class CascadeMaskRCNN(object): use_dataloader = use_dataloader and not mask_branch loader = fluid.io.DataLoader.from_generator( feed_list=list(feed_vars.values()), - capacity=64, + capacity=16, use_double_buffer=True, iterable=iterable) if use_dataloader else None return feed_vars, loader diff --git a/ppdet/modeling/architectures/cascade_rcnn.py b/ppdet/modeling/architectures/cascade_rcnn.py index 60e15ad1c..1e27888cf 100644 --- a/ppdet/modeling/architectures/cascade_rcnn.py +++ b/ppdet/modeling/architectures/cascade_rcnn.py @@ -318,7 +318,7 @@ class CascadeRCNN(object): lod_level=inputs_def[key]['lod_level'])) for key in fields]) loader = fluid.io.DataLoader.from_generator( feed_list=list(feed_vars.values()), - capacity=64, + capacity=16, use_double_buffer=True, iterable=iterable) if use_dataloader else None return feed_vars, loader diff --git a/ppdet/modeling/architectures/cascade_rcnn_cls_aware.py b/ppdet/modeling/architectures/cascade_rcnn_cls_aware.py index 364007ba8..1af96e38d 100644 --- a/ppdet/modeling/architectures/cascade_rcnn_cls_aware.py +++ b/ppdet/modeling/architectures/cascade_rcnn_cls_aware.py @@ -301,7 +301,7 @@ class CascadeRCNNClsAware(object): lod_level=inputs_def[key]['lod_level'])) for key in fields]) loader = fluid.io.DataLoader.from_generator( feed_list=list(feed_vars.values()), - capacity=64, + capacity=16, use_double_buffer=True, iterable=iterable) if use_dataloader else None return feed_vars, loader diff --git a/ppdet/modeling/architectures/faceboxes.py b/ppdet/modeling/architectures/faceboxes.py index ee7292017..f5b120ece 100644 --- a/ppdet/modeling/architectures/faceboxes.py +++ b/ppdet/modeling/architectures/faceboxes.py @@ -172,7 +172,7 @@ class FaceBoxes(object): lod_level=inputs_def[key]['lod_level'])) for key in fields]) loader = fluid.io.DataLoader.from_generator( feed_list=list(feed_vars.values()), - capacity=64, + capacity=16, use_double_buffer=True, iterable=iterable) if use_dataloader else None return feed_vars, loader diff --git a/ppdet/modeling/architectures/faster_rcnn.py b/ppdet/modeling/architectures/faster_rcnn.py index 239af6078..1b22ad67f 100644 --- a/ppdet/modeling/architectures/faster_rcnn.py +++ b/ppdet/modeling/architectures/faster_rcnn.py @@ -231,7 +231,7 @@ class FasterRCNN(object): lod_level=inputs_def[key]['lod_level'])) for key in fields]) loader = fluid.io.DataLoader.from_generator( feed_list=list(feed_vars.values()), - capacity=64, + capacity=16, use_double_buffer=True, iterable=iterable) if use_dataloader else None return feed_vars, loader diff --git a/ppdet/modeling/architectures/mask_rcnn.py b/ppdet/modeling/architectures/mask_rcnn.py index 558190009..1f3f0104a 100644 --- a/ppdet/modeling/architectures/mask_rcnn.py +++ b/ppdet/modeling/architectures/mask_rcnn.py @@ -324,7 +324,7 @@ class MaskRCNN(object): use_dataloader = use_dataloader and not mask_branch loader = fluid.io.DataLoader.from_generator( feed_list=list(feed_vars.values()), - capacity=64, + capacity=16, use_double_buffer=True, iterable=iterable) if use_dataloader else None return feed_vars, loader diff --git a/ppdet/modeling/architectures/retinanet.py b/ppdet/modeling/architectures/retinanet.py index 936a4ba4f..c541bc2b2 100644 --- a/ppdet/modeling/architectures/retinanet.py +++ b/ppdet/modeling/architectures/retinanet.py @@ -114,7 +114,7 @@ class RetinaNet(object): lod_level=inputs_def[key]['lod_level'])) for key in fields]) loader = fluid.io.DataLoader.from_generator( feed_list=list(feed_vars.values()), - capacity=64, + capacity=16, use_double_buffer=True, iterable=iterable) if use_dataloader else None return feed_vars, loader diff --git a/ppdet/modeling/architectures/ssd.py b/ppdet/modeling/architectures/ssd.py index 70aaf7319..5e082479d 100644 --- a/ppdet/modeling/architectures/ssd.py +++ b/ppdet/modeling/architectures/ssd.py @@ -118,7 +118,7 @@ class SSD(object): lod_level=inputs_def[key]['lod_level'])) for key in fields]) loader = fluid.io.DataLoader.from_generator( feed_list=list(feed_vars.values()), - capacity=64, + capacity=16, use_double_buffer=True, iterable=iterable) if use_dataloader else None return feed_vars, loader diff --git a/ppdet/modeling/architectures/yolov3.py b/ppdet/modeling/architectures/yolov3.py index cff0dec6d..52bb41932 100644 --- a/ppdet/modeling/architectures/yolov3.py +++ b/ppdet/modeling/architectures/yolov3.py @@ -147,7 +147,7 @@ class YOLOv3(object): lod_level=inputs_def[key]['lod_level'])) for key in fields]) loader = fluid.io.DataLoader.from_generator( feed_list=list(feed_vars.values()), - capacity=64, + capacity=16, use_double_buffer=True, iterable=iterable) if use_dataloader else None return feed_vars, loader diff --git a/tools/eval.py b/tools/eval.py index 7eb3f18f3..2feb5150f 100644 --- a/tools/eval.py +++ b/tools/eval.py @@ -84,7 +84,7 @@ def main(): fetches = model.eval(feed_vars, multi_scale_test) eval_prog = eval_prog.clone(True) - reader = create_reader(cfg.EvalReader) + reader = create_reader(cfg.EvalReader, devices_num=1) loader.set_sample_list_generator(reader, place) dataset = cfg['EvalReader']['dataset'] diff --git a/tools/infer.py b/tools/infer.py index 3835264b2..a81413b82 100644 --- a/tools/infer.py +++ b/tools/infer.py @@ -130,7 +130,7 @@ def main(): test_fetches = model.test(feed_vars) infer_prog = infer_prog.clone(True) - reader = create_reader(cfg.TestReader) + reader = create_reader(cfg.TestReader, devices_num=1) loader.set_sample_list_generator(reader, place) exe.run(startup_prog) diff --git a/tools/train.py b/tools/train.py index 846da330a..54af54ab8 100644 --- a/tools/train.py +++ b/tools/train.py @@ -144,7 +144,7 @@ def main(): fetches = model.eval(feed_vars) eval_prog = eval_prog.clone(True) - eval_reader = create_reader(cfg.EvalReader) + eval_reader = create_reader(cfg.EvalReader, devices_num=1) eval_loader.set_sample_list_generator(eval_reader, place) # parse eval fetches @@ -200,8 +200,10 @@ def main(): checkpoint.load_params( exe, train_prog, cfg.pretrain_weights, ignore_params=ignore_params) - train_reader = create_reader(cfg.TrainReader, (cfg.max_iters - start_iter) * - devices_num, cfg) + train_reader = create_reader( + cfg.TrainReader, (cfg.max_iters - start_iter) * devices_num, + cfg, + devices_num=devices_num) train_loader.set_sample_list_generator(train_reader, place) # whether output bbox is normalized in model output layer -- GitLab