未验证 提交 bb144d76 编写于 作者: P pkpk 提交者: GitHub

Add model check for DMTK (#2700)

* test=develop

* test=develop

* test=develop
上级 3cd5b2c9
...@@ -20,9 +20,12 @@ except ImportError as e: ...@@ -20,9 +20,12 @@ except ImportError as e:
import pickle #python 3 import pickle #python 3
sys.path.append('../../models/dialogue_model_toolkit/auto_dialogue_evaluation/') sys.path.append('../../models/dialogue_model_toolkit/auto_dialogue_evaluation/')
sys.path.append('../../models/')
from net import Network from net import Network
import config import config
from model_check import check_cuda
def train(args): def train(args):
"""Train """Train
...@@ -73,7 +76,8 @@ def train(args): ...@@ -73,7 +76,8 @@ def train(args):
print("device count %d" % dev_count) print("device count %d" % dev_count)
print("theoretical memory usage: ") print("theoretical memory usage: ")
print(fluid.contrib.memory_usage( print(
fluid.contrib.memory_usage(
program=train_program, batch_size=args.batch_size)) program=train_program, batch_size=args.batch_size))
exe = fluid.Executor(place) exe = fluid.Executor(place)
...@@ -155,8 +159,8 @@ def train(args): ...@@ -155,8 +159,8 @@ def train(args):
main_program=train_program) main_program=train_program)
print("Save model at step %d ... " % step) print("Save model at step %d ... " % step)
print(time.strftime('%Y-%m-%d %H:%M:%S', print(
time.localtime(time.time()))) time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
best_recall = recall_dict['1_in_10'] best_recall = recall_dict['1_in_10']
return best_recall return best_recall
...@@ -252,7 +256,8 @@ def finetune(args): ...@@ -252,7 +256,8 @@ def finetune(args):
print("device count %d" % dev_count) print("device count %d" % dev_count)
print("theoretical memory usage: ") print("theoretical memory usage: ")
print(fluid.contrib.memory_usage( print(
fluid.contrib.memory_usage(
program=train_program, batch_size=args.batch_size)) program=train_program, batch_size=args.batch_size))
exe = fluid.Executor(place) exe = fluid.Executor(place)
...@@ -321,8 +326,8 @@ def finetune(args): ...@@ -321,8 +326,8 @@ def finetune(args):
exe, exe,
main_program=train_program) main_program=train_program)
print("Save model at step %d ... " % step) print("Save model at step %d ... " % step)
print(time.strftime('%Y-%m-%d %H:%M:%S', print(
time.localtime(time.time()))) time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
best_cor = cor best_cor = cor
return best_cor return best_cor
...@@ -466,6 +471,8 @@ def main(): ...@@ -466,6 +471,8 @@ def main():
args = config.parse_args() args = config.parse_args()
config.print_arguments(args) config.print_arguments(args)
check_cuda(args.use_cuda)
if args.do_train == True: if args.do_train == True:
if args.loss_type == 'CLS': if args.loss_type == 'CLS':
train(args) train(args)
......
...@@ -5,12 +5,13 @@ Evaluation ...@@ -5,12 +5,13 @@ Evaluation
import sys import sys
import six import six
import numpy as np import numpy as np
from sklearn.metrics import average_precision_score
def evaluate_ubuntu(file_path): def evaluate_ubuntu(file_path):
""" """
Evaluate on ubuntu data Evaluate on ubuntu data
""" """
def get_p_at_n_in_m(data, n, m, ind): def get_p_at_n_in_m(data, n, m, ind):
""" """
Recall n at m Recall n at m
...@@ -56,7 +57,8 @@ def evaluate_ubuntu(file_path): ...@@ -56,7 +57,8 @@ def evaluate_ubuntu(file_path):
"1_in_2": p_at_1_in_2 / length, "1_in_2": p_at_1_in_2 / length,
"1_in_10": p_at_1_in_10 / length, "1_in_10": p_at_1_in_10 / length,
"2_in_10": p_at_2_in_10 / length, "2_in_10": p_at_2_in_10 / length,
"5_in_10": p_at_5_in_10 / length} "5_in_10": p_at_5_in_10 / length
}
return result_dict return result_dict
...@@ -65,6 +67,7 @@ def evaluate_douban(file_path): ...@@ -65,6 +67,7 @@ def evaluate_douban(file_path):
""" """
Evaluate douban data Evaluate douban data
""" """
def mean_average_precision(sort_data): def mean_average_precision(sort_data):
""" """
Evaluate mean average precision Evaluate mean average precision
...@@ -147,7 +150,6 @@ def evaluate_douban(file_path): ...@@ -147,7 +150,6 @@ def evaluate_douban(file_path):
"P_1": 1.0 * sum_p_1 / total_num, "P_1": 1.0 * sum_p_1 / total_num,
"1_in_10": 1.0 * sum_r_1 / total_num, "1_in_10": 1.0 * sum_r_1 / total_num,
"2_in_10": 1.0 * sum_r_2 / total_num, "2_in_10": 1.0 * sum_r_2 / total_num,
"5_in_10": 1.0 * sum_r_5 / total_num} "5_in_10": 1.0 * sum_r_5 / total_num
}
return result_dict return result_dict
...@@ -20,9 +20,12 @@ except ImportError as e: ...@@ -20,9 +20,12 @@ except ImportError as e:
import pickle #python 3 import pickle #python 3
sys.path.append('../../models/dialogue_model_toolkit/deep_attention_matching/') sys.path.append('../../models/dialogue_model_toolkit/deep_attention_matching/')
sys.path.append('../../models/')
from model_check import check_cuda
from net import Net from net import Net
def evaluate(score_path, result_file_path): def evaluate(score_path, result_file_path):
""" """
Evaluate both douban and ubuntu dataset Evaluate both douban and ubuntu dataset
...@@ -70,6 +73,7 @@ def test_with_pyreader(exe, program, pyreader, fetch_list, score_path, batches, ...@@ -70,6 +73,7 @@ def test_with_pyreader(exe, program, pyreader, fetch_list, score_path, batches,
""" """
Test with pyreader Test with pyreader
""" """
def data_provider(): def data_provider():
""" """
Data reader Data reader
...@@ -145,10 +149,12 @@ def train(args): ...@@ -145,10 +149,12 @@ def train(args):
staircase=True)) staircase=True))
optimizer.minimize(loss) optimizer.minimize(loss)
print("begin memory optimization ...") print("begin memory optimization ...")
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) print(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
fluid.memory_optimize(train_program) fluid.memory_optimize(train_program)
print("end memory optimization ...") print("end memory optimization ...")
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) print(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
test_program = fluid.Program() test_program = fluid.Program()
test_startup = fluid.Program() test_startup = fluid.Program()
...@@ -270,6 +276,7 @@ def train(args): ...@@ -270,6 +276,7 @@ def train(args):
""" """
Train on one epoch with pyreader Train on one epoch with pyreader
""" """
def data_provider(): def data_provider():
""" """
Data reader Data reader
...@@ -467,6 +474,9 @@ def get_cards(): ...@@ -467,6 +474,9 @@ def get_cards():
if __name__ == '__main__': if __name__ == '__main__':
args = config.parse_args() args = config.parse_args()
config.print_arguments(args) config.print_arguments(args)
check_cuda(args.use_cuda)
if args.do_train: if args.do_train:
train(args) train(args)
......
...@@ -34,12 +34,16 @@ import define_predict_pack ...@@ -34,12 +34,16 @@ import define_predict_pack
import reader.data_reader as reader import reader.data_reader as reader
_WORK_DIR = os.path.split(os.path.realpath(__file__))[0] _WORK_DIR = os.path.split(os.path.realpath(__file__))[0]
sys.path.append('../../models/dialogue_model_toolkit/dialogue_general_understanding') sys.path.append(
'../../models/dialogue_model_toolkit/dialogue_general_understanding')
sys.path.append('../../models/')
from bert import BertConfig, BertModel from bert import BertConfig, BertModel
from create_model import create_model from create_model import create_model
import define_paradigm import define_paradigm
from model_check import check_cuda
def main(args): def main(args):
"""main function""" """main function"""
...@@ -117,10 +121,7 @@ def main(args): ...@@ -117,10 +121,7 @@ def main(args):
use_cuda=args.use_cuda, main_program=predict_prog) use_cuda=args.use_cuda, main_program=predict_prog)
test_data_generator = processor.data_generator( test_data_generator = processor.data_generator(
batch_size=args.batch_size, batch_size=args.batch_size, phase='test', epoch=1, shuffle=False)
phase='test',
epoch=1,
shuffle=False)
predict_pyreader.decorate_tensor_provider(test_data_generator) predict_pyreader.decorate_tensor_provider(test_data_generator)
predict_pyreader.start() predict_pyreader.start()
...@@ -161,4 +162,7 @@ def main(args): ...@@ -161,4 +162,7 @@ def main(args):
if __name__ == '__main__': if __name__ == '__main__':
args = parser.parse_args() args = parser.parse_args()
print_arguments(args) print_arguments(args)
check_cuda(args.use_cuda)
main(args) main(args)
...@@ -33,7 +33,11 @@ from utils.args import print_arguments ...@@ -33,7 +33,11 @@ from utils.args import print_arguments
from utils.init import init_checkpoint, init_pretraining_params from utils.init import init_checkpoint, init_pretraining_params
_WORK_DIR = os.path.split(os.path.realpath(__file__))[0] _WORK_DIR = os.path.split(os.path.realpath(__file__))[0]
sys.path.append('../../models/dialogue_model_toolkit/dialogue_general_understanding') sys.path.append(
'../../models/dialogue_model_toolkit/dialogue_general_understanding')
sys.path.append('../../models/')
from model_check import check_cuda
from bert import BertConfig, BertModel from bert import BertConfig, BertModel
from create_model import create_model from create_model import create_model
...@@ -48,7 +52,8 @@ def evaluate(test_exe, test_program, test_pyreader, fetch_list, eval_phase): ...@@ -48,7 +52,8 @@ def evaluate(test_exe, test_program, test_pyreader, fetch_list, eval_phase):
while True: while True:
try: try:
if len(fetch_list) > 2: if len(fetch_list) > 2:
np_loss, np_acc, np_num_seqs = test_exe.run(fetch_list=fetch_list) np_loss, np_acc, np_num_seqs = test_exe.run(
fetch_list=fetch_list)
total_acc.extend(np_acc * np_num_seqs) total_acc.extend(np_acc * np_num_seqs)
else: else:
np_loss, np_num_seqs = test_exe.run(fetch_list=fetch_list) np_loss, np_num_seqs = test_exe.run(fetch_list=fetch_list)
...@@ -58,15 +63,17 @@ def evaluate(test_exe, test_program, test_pyreader, fetch_list, eval_phase): ...@@ -58,15 +63,17 @@ def evaluate(test_exe, test_program, test_pyreader, fetch_list, eval_phase):
test_pyreader.reset() test_pyreader.reset()
break break
time_end = time.time() time_end = time.time()
current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) current_time = time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(time.time()))
if len(fetch_list) > 2: if len(fetch_list) > 2:
print("[%s evaluation] %s ave loss: %f, ave acc: %f, elapsed time: %f s" % print("[%s evaluation] %s ave loss: %f, ave acc: %f, elapsed time: %f s"
(eval_phase, current_time, np.sum(total_cost) / np.sum(total_num_seqs), % (eval_phase, current_time, np.sum(total_cost) /
np.sum(total_acc) / np.sum(total_num_seqs), time_end - time_begin)) np.sum(total_num_seqs), np.sum(total_acc) /
np.sum(total_num_seqs), time_end - time_begin))
else: else:
print("[%s evaluation] %s ave loss: %f, elapsed time: %f s" % print("[%s evaluation] %s ave loss: %f, elapsed time: %f s" %
(eval_phase, current_time, np.sum(total_cost) / np.sum(total_num_seqs), (eval_phase, current_time, np.sum(total_cost) /
time_end - time_begin)) np.sum(total_num_seqs), time_end - time_begin))
def main(args): def main(args):
...@@ -169,12 +176,13 @@ def main(args): ...@@ -169,12 +176,13 @@ def main(args):
loss_scaling=args.loss_scaling) loss_scaling=args.loss_scaling)
if accuracy is not None: if accuracy is not None:
skip_opt_set = [loss.name, probs.name, accuracy.name, num_seqs.name] skip_opt_set = [
loss.name, probs.name, accuracy.name, num_seqs.name
]
else: else:
skip_opt_set = [loss.name, probs.name, num_seqs.name] skip_opt_set = [loss.name, probs.name, num_seqs.name]
fluid.memory_optimize( fluid.memory_optimize(
input_program=train_program, input_program=train_program, skip_opt_set=skip_opt_set)
skip_opt_set=skip_opt_set)
if args.verbose: if args.verbose:
if in_tokens[task_name]: if in_tokens[task_name]:
...@@ -266,7 +274,9 @@ def main(args): ...@@ -266,7 +274,9 @@ def main(args):
if steps % args.skip_steps == 0: if steps % args.skip_steps == 0:
if warmup_steps <= 0: if warmup_steps <= 0:
if accuracy is not None: if accuracy is not None:
fetch_list = [loss.name, accuracy.name, num_seqs.name] fetch_list = [
loss.name, accuracy.name, num_seqs.name
]
else: else:
fetch_list = [loss.name, num_seqs.name] fetch_list = [loss.name, num_seqs.name]
else: else:
...@@ -276,7 +286,9 @@ def main(args): ...@@ -276,7 +286,9 @@ def main(args):
num_seqs.name num_seqs.name
] ]
else: else:
fetch_list = [loss.name, scheduled_lr.name, num_seqs.name] fetch_list = [
loss.name, scheduled_lr.name, num_seqs.name
]
else: else:
fetch_list = [] fetch_list = []
if accuracy is not None: if accuracy is not None:
...@@ -304,36 +316,51 @@ def main(args): ...@@ -304,36 +316,51 @@ def main(args):
total_acc.extend(np_acc * np_num_seqs) total_acc.extend(np_acc * np_num_seqs)
if args.verbose: if args.verbose:
verbose = "train pyreader queue size: %d, " % train_pyreader.queue.size() verbose = "train pyreader queue size: %d, " % train_pyreader.queue.size(
)
verbose += "learning rate: %f" % ( verbose += "learning rate: %f" % (
np_lr[0] np_lr[0]
if warmup_steps > 0 else args.learning_rate) if warmup_steps > 0 else args.learning_rate)
print(verbose) print(verbose)
current_example, current_epoch = processor.get_train_progress() current_example, current_epoch = processor.get_train_progress(
)
time_end = time.time() time_end = time.time()
used_time = time_end - time_begin used_time = time_end - time_begin
current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) current_time = time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(time.time()))
if accuracy is not None: if accuracy is not None:
print("%s epoch: %d, progress: %d/%d, step: %d, ave loss: %f, " print(
"%s epoch: %d, progress: %d/%d, step: %d, ave loss: %f, "
"ave acc: %f, speed: %f steps/s" % "ave acc: %f, speed: %f steps/s" %
(current_time, current_epoch, current_example, num_train_examples, (current_time, current_epoch, current_example,
steps, np.sum(total_cost) / np.sum(total_num_seqs), num_train_examples, steps,
np.sum(total_cost) / np.sum(total_num_seqs),
np.sum(total_acc) / np.sum(total_num_seqs), np.sum(total_acc) / np.sum(total_num_seqs),
args.skip_steps / used_time)) args.skip_steps / used_time))
ce_info.append([np.sum(total_cost) / np.sum(total_num_seqs), np.sum(total_acc) / np.sum(total_num_seqs), args.skip_steps / used_time]) ce_info.append([
np.sum(total_cost) / np.sum(total_num_seqs),
np.sum(total_acc) / np.sum(total_num_seqs),
args.skip_steps / used_time
])
else: else:
print("%s epoch: %d, progress: %d/%d, step: %d, ave loss: %f, " print(
"%s epoch: %d, progress: %d/%d, step: %d, ave loss: %f, "
"speed: %f steps/s" % "speed: %f steps/s" %
(current_time, current_epoch, current_example, num_train_examples, (current_time, current_epoch, current_example,
steps, np.sum(total_cost) / np.sum(total_num_seqs), num_train_examples, steps,
np.sum(total_cost) / np.sum(total_num_seqs),
args.skip_steps / used_time)) args.skip_steps / used_time))
ce_info.append([np.sum(total_cost) / np.sum(total_num_seqs), args.skip_steps / used_time]) ce_info.append([
np.sum(total_cost) / np.sum(total_num_seqs),
args.skip_steps / used_time
])
total_cost, total_acc, total_num_seqs = [], [], [] total_cost, total_acc, total_num_seqs = [], [], []
time_begin = time.time() time_begin = time.time()
if steps % args.save_steps == 0: if steps % args.save_steps == 0:
save_path = os.path.join(args.checkpoints, "step_" + str(steps)) save_path = os.path.join(args.checkpoints,
"step_" + str(steps))
fluid.io.save_persistables(exe, save_path, train_program) fluid.io.save_persistables(exe, save_path, train_program)
if steps % args.validation_steps == 0: if steps % args.validation_steps == 0:
#evaluate dev set #evaluate dev set
...@@ -344,7 +371,8 @@ def main(args): ...@@ -344,7 +371,8 @@ def main(args):
phase='dev', phase='dev',
epoch=1, epoch=1,
shuffle=False)) shuffle=False))
evaluate(test_exe, test_prog, test_pyreader, fetch_test_list, "dev") evaluate(test_exe, test_prog, test_pyreader,
fetch_test_list, "dev")
#evaluate test set #evaluate test set
if args.do_test: if args.do_test:
test_pyreader.decorate_tensor_provider( test_pyreader.decorate_tensor_provider(
...@@ -353,7 +381,8 @@ def main(args): ...@@ -353,7 +381,8 @@ def main(args):
phase='test', phase='test',
epoch=1, epoch=1,
shuffle=False)) shuffle=False))
evaluate(test_exe, test_prog, test_pyreader, fetch_test_list, "test") evaluate(test_exe, test_prog, test_pyreader,
fetch_test_list, "test")
except fluid.core.EOFException: except fluid.core.EOFException:
save_path = os.path.join(args.checkpoints, "step_" + str(steps)) save_path = os.path.join(args.checkpoints, "step_" + str(steps))
fluid.io.save_persistables(exe, save_path, train_program) fluid.io.save_persistables(exe, save_path, train_program)
...@@ -373,10 +402,8 @@ def main(args): ...@@ -373,10 +402,8 @@ def main(args):
print("ce info error") print("ce info error")
print("kpis\teach_step_duration_%s_card%s\t%s" % print("kpis\teach_step_duration_%s_card%s\t%s" %
(task_name, card_num, ce_time)) (task_name, card_num, ce_time))
print("kpis\ttrain_loss_%s_card%s\t%f" % print("kpis\ttrain_loss_%s_card%s\t%f" % (task_name, card_num, ce_loss))
(task_name, card_num, ce_loss)) print("kpis\ttrain_acc_%s_card%s\t%f" % (task_name, card_num, ce_acc))
print("kpis\ttrain_acc_%s_card%s\t%f" %
(task_name, card_num, ce_acc))
#final eval on dev set #final eval on dev set
if args.do_val: if args.do_val:
...@@ -411,4 +438,7 @@ def get_cards(): ...@@ -411,4 +438,7 @@ def get_cards():
if __name__ == '__main__': if __name__ == '__main__':
args = parser.parse_args() args = parser.parse_args()
print_arguments(args) print_arguments(args)
check_cuda(args.use_cuda)
main(args) main(args)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册