未验证 提交 12d9a640 编写于 作者: L Li Fuchen 提交者: GitHub

Revert "unify reader to dataloader (#3488)" (#3550)

This reverts commit 5108c1c1.
上级 272a9b52
...@@ -60,10 +60,10 @@ def parse_args(): ...@@ -60,10 +60,10 @@ def parse_args():
default=False, default=False,
help='Whether profiling the trainning [True|False]') help='Whether profiling the trainning [True|False]')
parser.add_argument( parser.add_argument(
'--use_dataloader', '--use_py_reader',
type=str2bool, type=str2bool,
default=False, default=False,
help='Whether using dataloader to feed data [True|False]') help='Whether using py_reader to feed data [True|False]')
parser.add_argument( parser.add_argument(
'--log_path', '--log_path',
help='path of the log file. If not set, logs are printed to console') help='path of the log file. If not set, logs are printed to console')
......
...@@ -6,7 +6,7 @@ function run_train() { ...@@ -6,7 +6,7 @@ function run_train() {
python train.py \ python train.py \
--data_path data/simple-examples/data/ \ --data_path data/simple-examples/data/ \
--model_type small \ --model_type small \
--use_gpu True \ --use_gpu True
} }
run_train run_train
...@@ -124,10 +124,10 @@ def main(): ...@@ -124,10 +124,10 @@ def main():
init_scale=config.init_scale, init_scale=config.init_scale,
dropout=config.dropout, dropout=config.dropout,
rnn_model=config.rnn_model, rnn_model=config.rnn_model,
use_dataloader=args.use_dataloader) use_py_reader=args.use_py_reader)
if args.use_dataloader: if args.use_py_reader:
dataloader = res_vars[-1] py_reader = res_vars[-1]
res_vars = res_vars[:-1] res_vars = res_vars[:-1]
loss, last_hidden, last_cell, feed_order = res_vars loss, last_hidden, last_cell, feed_order = res_vars
...@@ -159,7 +159,7 @@ def main(): ...@@ -159,7 +159,7 @@ def main():
init_scale=config.init_scale, init_scale=config.init_scale,
dropout=config.dropout, dropout=config.dropout,
rnn_model=config.rnn_model, rnn_model=config.rnn_model,
use_dataloader=False) use_py_reader=False)
# Some op behaves differently for train and inference, we need to call # Some op behaves differently for train and inference, we need to call
# this clone function to ensure every op is right for inference. # this clone function to ensure every op is right for inference.
inference_program = inference_program.clone(for_test=True) inference_program = inference_program.clone(for_test=True)
...@@ -176,6 +176,8 @@ def main(): ...@@ -176,6 +176,8 @@ def main():
exec_strategy.num_iteration_per_drop_scope = 100 exec_strategy.num_iteration_per_drop_scope = 100
build_strategy = fluid.BuildStrategy() build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = True
build_strategy.memory_optimize = False
build_strategy.fuse_all_optimizer_ops = True build_strategy.fuse_all_optimizer_ops = True
if args.parallel: if args.parallel:
...@@ -308,7 +310,7 @@ def main(): ...@@ -308,7 +310,7 @@ def main():
ppl = np.exp(total_loss / iters) ppl = np.exp(total_loss / iters)
return ppl return ppl
def train_an_epoch_dataloader(epoch_id, batch_times): def train_an_epoch_py_reader(epoch_id, batch_times):
# get train epoch size # get train epoch size
log_interval = get_log_interval(len(train_data)) log_interval = get_log_interval(len(train_data))
...@@ -317,7 +319,7 @@ def main(): ...@@ -317,7 +319,7 @@ def main():
total_loss = 0 total_loss = 0
iters = 0 iters = 0
dataloader.start() py_reader.start()
batch_id = 0 batch_id = 0
try: try:
while True: while True:
...@@ -359,14 +361,14 @@ def main(): ...@@ -359,14 +361,14 @@ def main():
batch_id += 1 batch_id += 1
except fluid.core.EOFException: except fluid.core.EOFException:
dataloader.reset() py_reader.reset()
batch_times.append(time.time() - batch_start_time) batch_times.append(time.time() - batch_start_time)
ppl = np.exp(total_loss / iters) ppl = np.exp(total_loss / iters)
return ppl return ppl
def train(): def train():
if args.use_dataloader: if args.use_py_reader:
def data_gen(): def data_gen():
data_iter_size = config.batch_size // device_count data_iter_size = config.batch_size // device_count
...@@ -378,14 +380,14 @@ def main(): ...@@ -378,14 +380,14 @@ def main():
y = y.reshape((-1, 1)) y = y.reshape((-1, 1))
yield x, y yield x, y
dataloader.set_batch_generator(data_gen) py_reader.decorate_tensor_provider(data_gen)
total_time = 0.0 total_time = 0.0
for epoch_id in range(config.max_epoch): for epoch_id in range(config.max_epoch):
batch_times = [] batch_times = []
epoch_start_time = time.time() epoch_start_time = time.time()
if args.use_dataloader: if args.use_py_reader:
train_ppl = train_an_epoch_dataloader(epoch_id, batch_times) train_ppl = train_an_epoch_py_reader(epoch_id, batch_times)
else: else:
train_ppl = train_an_epoch(epoch_id, batch_times) train_ppl = train_an_epoch(epoch_id, batch_times)
epoch_time = time.time() - epoch_start_time epoch_time = time.time() - epoch_start_time
......
...@@ -32,7 +32,7 @@ def lm_model(hidden_size, ...@@ -32,7 +32,7 @@ def lm_model(hidden_size,
init_scale=0.1, init_scale=0.1,
dropout=None, dropout=None,
rnn_model='static', rnn_model='static',
use_dataloader=False): use_py_reader=False):
def padding_rnn(input_embedding, len=3, init_hidden=None, init_cell=None): def padding_rnn(input_embedding, len=3, init_hidden=None, init_cell=None):
weight_1_arr = [] weight_1_arr = []
weight_2_arr = [] weight_2_arr = []
...@@ -255,23 +255,23 @@ def lm_model(hidden_size, ...@@ -255,23 +255,23 @@ def lm_model(hidden_size,
return real_res, last_hidden, last_cell return real_res, last_hidden, last_cell
batch_size_each = batch_size // fluid.core.get_cuda_device_count() batch_size_each = batch_size // fluid.core.get_cuda_device_count()
x = layers.data( if use_py_reader:
name="x", feed_shapes = [[batch_size_each, num_steps, 1],
shape=[batch_size_each, num_steps, 1], [batch_size_each * num_steps, 1]]
dtype='int64', py_reader = fluid.layers.py_reader(
append_batch_size=False) capacity=16, shapes=feed_shapes, dtypes=['int64', 'int64'])
y = layers.data( x, y = fluid.layers.read_file(py_reader)
name="y", else:
shape=[batch_size_each * num_steps, 1], x = layers.data(
dtype='int64', name="x",
append_batch_size=False) shape=[batch_size_each, num_steps, 1],
dtype='int64',
if use_dataloader: append_batch_size=False)
dataloader = fluid.io.DataLoader.from_generator( y = layers.data(
feed_list=[x, y], name="y",
capacity=16, shape=[batch_size_each * num_steps, 1],
iterable=False, dtype='int64',
use_double_buffer=True) append_batch_size=False)
init_hidden = layers.data( init_hidden = layers.data(
name="init_hidden", name="init_hidden",
...@@ -385,7 +385,7 @@ def lm_model(hidden_size, ...@@ -385,7 +385,7 @@ def lm_model(hidden_size,
layers.assign(input=last_hidden, output=init_hidden) layers.assign(input=last_hidden, output=init_hidden)
feeding_list = ['x', 'y', 'init_hidden', 'init_cell'] feeding_list = ['x', 'y', 'init_hidden', 'init_cell']
if use_dataloader: if use_py_reader:
return loss, last_hidden, last_cell, feeding_list, dataloader return loss, last_hidden, last_cell, feeding_list, py_reader
else: else:
return loss, last_hidden, last_cell, feeding_list return loss, last_hidden, last_cell, feeding_list
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册