未验证 提交 f04ae975 编写于 作者: F fengjiayi 提交者: GitHub

Merge pull request #12161 from JiayiFeng/make_get_test_program_private

Remove buggy get_test_program and refine reader demo
...@@ -790,101 +790,3 @@ def get_parameter_value_by_name(name, executor, program=None): ...@@ -790,101 +790,3 @@ def get_parameter_value_by_name(name, executor, program=None):
program = default_main_program() program = default_main_program()
var = program.global_block().var(name) var = program.global_block().var(name)
return get_parameter_value(var, executor) return get_parameter_value(var, executor)
def get_test_program(filelist, program=None, startup_program=None):
"""
Transpile current train program to a program to read test dataset
if the program is using reader ops like "open_files_op".
"""
def _copy_reader_var_(block, var, new_name=None):
if new_name == None:
new_name = var.name
new_var = block.create_var(
name=str(new_name), type=core.VarDesc.VarType.READER)
new_var.desc.set_shapes(var.desc.shapes())
new_var.desc.set_dtypes(var.desc.dtypes())
new_var.persistable = True
return new_var
def _get_test_reader_name(train_reader_name):
return train_reader_name + "_test"
def _is_reader_op(op):
block = op.block
if "Out" in op.output_names:
reader_out = block.vars[op.output("Out")[0]]
if reader_out.type == core.VarDesc.VarType.READER:
return True
return False
if program == None:
program = default_main_program()
if startup_program == None:
startup_program = default_startup_program()
startup_block = startup_program.global_block()
# 1. find out the orignal reader var name
startup_reader_op_list = []
for op in startup_block.ops:
if _is_reader_op(op):
startup_reader_op_list.append(op)
if len(startup_reader_op_list) == 0:
return program
root_reader_op = startup_reader_op_list[0]
train_test_reader_map = {}
# 2. add operators to startup to read open and read test data files
for op in startup_reader_op_list:
assert (len(op.output("Out")) == 1)
train_reader_name = op.output("Out")[0]
train_reader = startup_block.vars[train_reader_name]
test_reader = _copy_reader_var_(
startup_block,
train_reader,
new_name=_get_test_reader_name(train_reader_name))
train_test_reader_map[train_reader.name] = test_reader
test_op_inputs = {}
for name in op.input_names:
train_arg_names = op.input(name)
test_arg_vars = []
for arg_name in train_arg_names:
arg_var = train_test_reader_map[
arg_name] if name == "UnderlyingReader" else startup_block.vars[
arg_name]
test_arg_vars.append(arg_var)
test_op_inputs[name] = test_arg_vars
test_op = startup_block.append_op(
type=op.type,
inputs=test_op_inputs,
outputs={'Out': [test_reader]},
attrs=op.attrs)
# root reader op's filelist attr for read test files
if op.type == root_reader_op.type:
test_op.set_attr("file_names", filelist)
if op.type == "create_multi_pass_reader":
test_op.set_attr("pass_num", 1)
# 3. rename reader vars in inference program to different name
# to avoid read from train data.
main_block = program.global_block()
for var in main_block.vars.values():
if var.type == core.VarDesc.VarType.READER:
main_block._rename_var(
str(var.name), str(_get_test_reader_name(var.name)))
for op in main_block.ops:
if op.type == root_reader_op.type:
test_op.set_attr("file_names", filelist)
if op.type == "create_multi_pass_reader":
test_op.set_attr("pass_num", 1)
startup_program._sync_with_cpp()
program._sync_with_cpp()
return program
...@@ -35,7 +35,7 @@ if len(sys.argv) == 1: ...@@ -35,7 +35,7 @@ if len(sys.argv) == 1:
word_dict = paddle.dataset.imdb.word_dict() word_dict = paddle.dataset.imdb.word_dict()
else: else:
word_dict = load_vocab(sys.argv[1]) word_dict = load_vocab(sys.argv[1])
word_dict["<unk>"] = len(word_dict) word_dict["<unk>"] = len(word_dict)
print "Dict dim = ", len(word_dict) print "Dict dim = ", len(word_dict)
# input text data # input text data
...@@ -50,7 +50,7 @@ feeder = fluid.DataFeeder(feed_list=[data, label], place=fluid.CPUPlace()) ...@@ -50,7 +50,7 @@ feeder = fluid.DataFeeder(feed_list=[data, label], place=fluid.CPUPlace())
BATCH_SIZE = 128 BATCH_SIZE = 128
train_reader = paddle.batch( train_reader = paddle.batch(
paddle.reader.shuffle( paddle.reader.shuffle(
paddle.dataset.imdb.train(word_dict), buf_size=10000), paddle.dataset.imdb.train(word_dict), buf_size=25000),
batch_size=BATCH_SIZE) batch_size=BATCH_SIZE)
test_reader = paddle.batch( test_reader = paddle.batch(
......
...@@ -19,7 +19,7 @@ import sys ...@@ -19,7 +19,7 @@ import sys
TRAIN_FILES = ['train.recordio'] TRAIN_FILES = ['train.recordio']
TEST_FILES = ['test.recordio'] TEST_FILES = ['test.recordio']
DICT_DIM = 89528 DICT_DIM = 5147
# embedding dim # embedding dim
emb_dim = 128 emb_dim = 128
...@@ -27,58 +27,46 @@ emb_dim = 128 ...@@ -27,58 +27,46 @@ emb_dim = 128
# hidden dim # hidden dim
hid_dim = 128 hid_dim = 128
# hidden dim2
hid_dim2 = 96
# class num # class num
class_dim = 2 class_dim = 2
# epoch num
epoch_num = 10
def network_cfg(is_train, pass_num=100):
with fluid.unique_name.guard():
train_file_obj = fluid.layers.open_files(
filenames=TRAIN_FILES,
pass_num=pass_num,
shapes=[[-1, 1], [-1, 1]],
lod_levels=[1, 0],
dtypes=['int64', 'int64'])
test_file_obj = fluid.layers.open_files(
filenames=TEST_FILES,
pass_num=1,
shapes=[[-1, 1], [-1, 1]],
lod_levels=[1, 0],
dtypes=['int64', 'int64'])
if is_train: def build_program(is_train):
file_obj = fluid.layers.shuffle(train_file_obj, buffer_size=1000) file_obj_handle = fluid.layers.io.open_files(
else: filenames=TRAIN_FILES if is_train else TEST_FILES,
file_obj = test_file_obj shapes=[[-1, 1], [-1, 1]],
lod_levels=[1, 0],
dtypes=['int64', 'int64'])
file_obj = fluid.layers.double_buffer( file_obj = fluid.layers.io.double_buffer(file_obj_handle)
file_obj,
name="train_double_buffer" if is_train else 'test_double_buffer') with fluid.unique_name.guard():
data, label = fluid.layers.read_file(file_obj) data, label = fluid.layers.read_file(file_obj)
emb = fluid.layers.embedding(input=data, size=[DICT_DIM, emb_dim]) emb = fluid.layers.embedding(input=data, size=[DICT_DIM, emb_dim])
# sequence conv with window size = 3
win_size = 3
conv_3 = fluid.nets.sequence_conv_pool( conv_3 = fluid.nets.sequence_conv_pool(
input=emb, input=emb,
num_filters=hid_dim, num_filters=hid_dim,
filter_size=win_size, filter_size=3,
act="tanh", act="tanh",
pool_type="max") pool_type="sqrt")
# fc layer after conv conv_4 = fluid.nets.sequence_conv_pool(
fc_1 = fluid.layers.fc(input=[conv_3], size=hid_dim2) input=emb,
num_filters=hid_dim,
filter_size=4,
act="tanh",
pool_type="sqrt")
# probability of each class prediction = fluid.layers.fc(input=[conv_3, conv_4],
prediction = fluid.layers.fc(input=[fc_1],
size=class_dim, size=class_dim,
act="softmax") act="softmax")
# cross entropy loss # cross entropy loss
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
...@@ -88,58 +76,62 @@ def network_cfg(is_train, pass_num=100): ...@@ -88,58 +76,62 @@ def network_cfg(is_train, pass_num=100):
if is_train: if is_train:
# SGD optimizer # SGD optimizer
sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=0.01) sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=0.001)
sgd_optimizer.minimize(avg_cost) sgd_optimizer.minimize(avg_cost)
return { return {'loss': avg_cost, 'log': [avg_cost, acc], 'file': file_obj_handle}
'loss': avg_cost,
'log': [avg_cost, acc],
'file': train_file_obj if is_train else test_file_obj
}
def main(): def main():
train = fluid.Program() train = fluid.Program()
startup = fluid.Program() startup = fluid.Program()
test = fluid.Program()
with fluid.program_guard(train, startup): with fluid.program_guard(train, startup):
train_args = network_cfg(is_train=True) train_args = build_program(is_train=True)
test = fluid.Program()
with fluid.program_guard(test, fluid.Program()): with fluid.program_guard(test, startup):
test_args = network_cfg(is_train=False) test_args = build_program(is_train=False)
use_cuda = fluid.core.is_compiled_with_cuda()
# startup # startup
place = fluid.CUDAPlace(0) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place=place) exe = fluid.Executor(place=place)
exe.run(startup) exe.run(startup)
train_exe = fluid.ParallelExecutor( train_exe = fluid.ParallelExecutor(
use_cuda=True, loss_name=train_args['loss'].name, main_program=train) use_cuda=use_cuda,
loss_name=train_args['loss'].name,
main_program=train)
test_exe = fluid.ParallelExecutor(
use_cuda=use_cuda, main_program=test, share_vars_from=train_exe)
fetch_var_list = [var.name for var in train_args['log']] fetch_var_list = [var.name for var in train_args['log']]
for i in xrange(sys.maxint): for epoch_id in range(epoch_num):
result = map(numpy.array, # train
train_exe.run(fetch_list=fetch_var_list try:
if i % 1000 == 0 else [])) batch_id = 0
if len(result) != 0: while True:
print 'Train: ', result loss, acc = map(numpy.array,
train_exe.run(fetch_list=fetch_var_list))
if i % 1000 == 0: print 'Train epoch', epoch_id, 'batch', batch_id, 'loss:', loss, 'acc:', acc
test_exe = fluid.ParallelExecutor( batch_id += 1
use_cuda=True, main_program=test, share_vars_from=train_exe) except fluid.core.EOFException:
loss = [] print 'End of epoch', epoch_id
acc = [] train_args['file'].reset()
try:
while True: # test
loss_np, acc_np = map( loss = []
numpy.array, test_exe.run(fetch_list=fetch_var_list)) acc = []
loss.append(loss_np[0]) try:
acc.append(acc_np[0]) while True:
except: loss_np, acc_np = map(numpy.array,
test_args['file'].reset() test_exe.run(fetch_list=fetch_var_list))
print 'TEST: ', numpy.mean(loss), numpy.mean(acc) loss.append(loss_np[0])
acc.append(acc_np[0])
except:
test_args['file'].reset()
print 'Test loss:', numpy.mean(loss), 'acc:', numpy.mean(acc)
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册