提交 0d56cdb2 编写于 作者: W wangguanzhong 提交者: GitHub

Revert "unify reader to dataloader (#3487)" (#3543)

This reverts commit b591cc133ca04dc1ed50631c8d24031bb97db18b.
上级 a8ae92aa
......@@ -38,7 +38,7 @@ feed_var_def = [
# yapf: enable
def create_feed(feed, iterable=False):
def create_feed(feed, use_pyreader=True):
image_shape = feed.image_shape
feed_var_map = {var['name']: var for var in feed_var_def}
feed_var_map['image'] = {
......@@ -66,9 +66,11 @@ def create_feed(feed, iterable=False):
dtype=feed_var_map[key]['dtype'],
lod_level=feed_var_map[key]['lod_level'])) for key in feed.fields])
loader = fluid.io.DataLoader.from_generator(
feed_list=list(feed_vars.values()),
capacity=64,
use_double_buffer=True,
iterable=iterable)
return loader, feed_vars
pyreader = None
if use_pyreader:
pyreader = fluid.io.PyReader(
feed_list=list(feed_vars.values()),
capacity=64,
use_double_buffer=True,
iterable=False)
return pyreader, feed_vars
......@@ -15,6 +15,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
......@@ -23,7 +24,7 @@ import paddle.fluid as fluid
import logging
logger = logging.getLogger(__name__)
__all__ = ['check_gpu', 'check_version']
__all__ = ['check_gpu']
def check_gpu(use_gpu):
......@@ -44,18 +45,3 @@ def check_gpu(use_gpu):
except Exception as e:
pass
def check_version():
"""
Log error and exit when the installed version of paddlepaddle is
not satisfied.
"""
err = "PaddlePaddle version 1.6 or higher is required, " \
"or a suitable develop version is satisfied as well. \n" \
"Please make sure the version is good with your code." \
try:
fluid.require_version('1.6.0')
except Exception as e:
logger.error(err)
sys.exit(1)
......@@ -57,7 +57,7 @@ def parse_fetches(fetches, prog=None, extra_keys=None):
return keys, values, cls
def eval_run(exe, compile_program, loader, keys, values, cls):
def eval_run(exe, compile_program, pyreader, keys, values, cls):
"""
Run evaluation program, return program outputs.
"""
......@@ -75,7 +75,7 @@ def eval_run(exe, compile_program, loader, keys, values, cls):
has_bbox = 'bbox' in keys
try:
loader.start()
pyreader.start()
while True:
outs = exe.run(compile_program,
fetch_list=values,
......@@ -90,7 +90,7 @@ def eval_run(exe, compile_program, loader, keys, values, cls):
iter_id += 1
images_num += len(res['bbox'][1][0]) if has_bbox else 1
except (StopIteration, fluid.core.EOFException):
loader.reset()
pyreader.reset()
logger.info('Test finish iter {}'.format(iter_id))
end_time = time.time()
......
......@@ -35,7 +35,7 @@ import paddle.fluid as fluid
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results
import ppdet.utils.checkpoint as checkpoint
from ppdet.utils.check import check_gpu, check_version
from ppdet.utils.check import check_gpu
from ppdet.modeling.model_input import create_feed
from ppdet.data.data_feed import create_reader
from ppdet.core.workspace import load_config, merge_config, create
......@@ -62,8 +62,6 @@ def main():
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
# check if paddlepaddle version is satisfied
check_version()
print_total_cfg(cfg)
if 'eval_feed' not in cfg:
......@@ -81,12 +79,12 @@ def main():
eval_prog = fluid.Program()
with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard():
loader, feed_vars = create_feed(eval_feed)
pyreader, feed_vars = create_feed(eval_feed)
fetches = model.eval(feed_vars)
eval_prog = eval_prog.clone(True)
reader = create_reader(eval_feed, args_path=FLAGS.dataset_dir)
loader.set_sample_list_generator(reader, place)
pyreader.decorate_sample_list_generator(reader, place)
# eval already exists json file
if FLAGS.json_eval:
......@@ -122,7 +120,7 @@ def main():
callable(model.is_bbox_normalized):
is_bbox_normalized = model.is_bbox_normalized()
results = eval_run(exe, compile_program, loader, keys, values, cls)
results = eval_run(exe, compile_program, pyreader, keys, values, cls)
# evaluation
resolution = None
......
......@@ -44,7 +44,7 @@ from ppdet.data.data_feed import create_reader
from ppdet.utils.eval_utils import parse_fetches
from ppdet.utils.cli import ArgsParser
from ppdet.utils.check import check_gpu, check_version
from ppdet.utils.check import check_gpu
from ppdet.utils.visualizer import visualize_results
import ppdet.utils.checkpoint as checkpoint
......@@ -150,8 +150,6 @@ def main():
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
# check if paddlepaddle version is satisfied
check_version()
print_total_cfg(cfg)
if 'test_feed' not in cfg:
......@@ -171,12 +169,12 @@ def main():
infer_prog = fluid.Program()
with fluid.program_guard(infer_prog, startup_prog):
with fluid.unique_name.guard():
loader, feed_vars = create_feed(test_feed, iterable=True)
_, feed_vars = create_feed(test_feed, use_pyreader=False)
test_fetches = model.test(feed_vars)
infer_prog = infer_prog.clone(True)
reader = create_reader(test_feed)
loader.set_sample_list_generator(reader, place)
feeder = fluid.DataFeeder(place=place, feed_list=feed_vars.values())
exe.run(startup_prog)
if cfg.weights:
......@@ -223,9 +221,9 @@ def main():
tb_image_frame = 0 # each frame can display ten pictures at most.
imid2path = reader.imid2path
for iter_id, data in enumerate(loader()):
for iter_id, data in enumerate(reader()):
outs = exe.run(infer_prog,
feed=data,
feed=feeder.feed(data),
fetch_list=values,
return_numpy=False)
res = {
......
......@@ -46,7 +46,7 @@ from ppdet.utils import dist_utils
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results
from ppdet.utils.stats import TrainingStats
from ppdet.utils.cli import ArgsParser
from ppdet.utils.check import check_gpu, check_version
from ppdet.utils.check import check_gpu
import ppdet.utils.checkpoint as checkpoint
from ppdet.modeling.model_input import create_feed
......@@ -81,8 +81,6 @@ def main():
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
# check if paddlepaddle version is satisfied
check_version()
if not FLAGS.dist or trainer_id == 0:
print_total_cfg(cfg)
......@@ -118,7 +116,7 @@ def main():
with fluid.program_guard(train_prog, startup_prog):
with fluid.unique_name.guard():
model = create(main_arch)
train_loader, feed_vars = create_feed(train_feed)
train_pyreader, feed_vars = create_feed(train_feed)
with mixed_precision_context(FLAGS.loss_scale, FLAGS.fp16) as ctx:
train_fetches = model.train(feed_vars)
......@@ -141,12 +139,12 @@ def main():
with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard():
model = create(main_arch)
eval_loader, feed_vars = create_feed(eval_feed)
eval_pyreader, feed_vars = create_feed(eval_feed)
fetches = model.eval(feed_vars)
eval_prog = eval_prog.clone(True)
eval_reader = create_reader(eval_feed, args_path=FLAGS.dataset_dir)
eval_loader.set_sample_list_generator(eval_reader, place)
eval_pyreader.decorate_sample_list_generator(eval_reader, place)
# parse eval fetches
extra_keys = []
......@@ -201,7 +199,7 @@ def main():
train_reader = create_reader(train_feed, (cfg.max_iters - start_iter) *
devices_num, FLAGS.dataset_dir)
train_loader.set_sample_list_generator(train_reader, place)
train_pyreader.decorate_sample_list_generator(train_reader, place)
# whether output bbox is normalized in model output layer
is_bbox_normalized = False
......@@ -213,7 +211,7 @@ def main():
map_type = cfg.map_type if 'map_type' in cfg else '11point'
train_stats = TrainingStats(cfg.log_smooth_window, train_keys)
train_loader.start()
train_pyreader.start()
start_time = time.time()
end_time = time.time()
......@@ -260,7 +258,7 @@ def main():
if FLAGS.eval:
# evaluation
results = eval_run(exe, compiled_eval_prog, eval_loader,
results = eval_run(exe, compiled_eval_prog, eval_pyreader,
eval_keys, eval_values, eval_cls)
resolution = None
if 'mask' in results[0]:
......@@ -282,7 +280,7 @@ def main():
logger.info("Best test box ap: {}, in iter: {}".format(
best_box_ap_list[0], best_box_ap_list[1]))
train_loader.reset()
train_pyreader.reset()
if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册