train.py 4.1 KB
Newer Older
F
frankwhzhang 已提交
1 2 3 4 5 6 7 8 9 10 11
import os
import sys
import time
import six
import numpy as np
import math
import argparse
import paddle.fluid as fluid
import paddle
import time
import utils
F
frankwhzhang 已提交
12
import net
F
8.3  
frankwhzhang 已提交
13

F
frankwhzhang 已提交
14 15
SEED = 102

F
8.3  
frankwhzhang 已提交
16

F
frankwhzhang 已提交
17 18 19
def parse_args():
    parser = argparse.ArgumentParser("gru4rec benchmark.")
    parser.add_argument(
Z
zhangwenhui03 已提交
20 21 22 23 24 25
        '--train_dir', type=str, default='train_data', help='train file')
    parser.add_argument(
        '--vocab_path', type=str, default='vocab.txt', help='vocab file')
    parser.add_argument(
        '--is_local', type=int, default=1, help='whether is local')
    parser.add_argument('--hid_size', type=int, default=100, help='hidden size')
F
frankwhzhang 已提交
26 27 28 29 30 31
    parser.add_argument(
        '--model_dir', type=str, default='model_recall20', help='model dir')
    parser.add_argument(
        '--batch_size', type=int, default=5, help='num of batch size')
    parser.add_argument(
        '--print_batch', type=int, default=10, help='num of print batch')
Z
zhangwenhui03 已提交
32 33
    parser.add_argument(
        '--pass_num', type=int, default=10, help='number of epoch')
F
frankwhzhang 已提交
34 35 36 37 38 39
    parser.add_argument(
        '--use_cuda', type=int, default=0, help='whether use gpu')
    parser.add_argument(
        '--parallel', type=int, default=0, help='whether parallel')
    parser.add_argument(
        '--base_lr', type=float, default=0.01, help='learning rate')
F
frankwhzhang 已提交
40
    parser.add_argument(
F
8.3  
frankwhzhang 已提交
41
        '--num_devices', type=int, default=1, help='Number of GPU devices')
F
frankwhzhang 已提交
42 43 44
    args = parser.parse_args()
    return args

Z
zhangwenhui03 已提交
45

F
frankwhzhang 已提交
46 47
def get_cards(args):
    return args.num_devices
F
frankwhzhang 已提交
48

Z
zhangwenhui03 已提交
49

F
frankwhzhang 已提交
50 51
def train():
    """ do training """
F
frankwhzhang 已提交
52
    args = parse_args()
F
frankwhzhang 已提交
53 54 55 56 57 58 59 60 61 62
    hid_size = args.hid_size
    train_dir = args.train_dir
    vocab_path = args.vocab_path
    use_cuda = True if args.use_cuda else False
    parallel = True if args.parallel else False
    print("use_cuda:", use_cuda, "parallel:", parallel)
    batch_size = args.batch_size
    vocab_size, train_reader = utils.prepare_data(
        train_dir, vocab_path, batch_size=batch_size * get_cards(args),\
        buffer_size=1000, word_freq_threshold=0, is_train=True)
F
frankwhzhang 已提交
63 64

    # Train program
Z
zhangwenhui03 已提交
65 66
    src_wordseq, dst_wordseq, avg_cost, acc = net.network(
        vocab_size=vocab_size, hid_size=hid_size)
F
frankwhzhang 已提交
67 68

    # Optimization to minimize lost
F
frankwhzhang 已提交
69
    sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=args.base_lr)
F
frankwhzhang 已提交
70
    sgd_optimizer.minimize(avg_cost)
Z
zhangwenhui03 已提交
71

F
frankwhzhang 已提交
72 73 74
    # Initialize executor
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)
F
frankwhzhang 已提交
75
    exe.run(fluid.default_startup_program())
F
8.3  
frankwhzhang 已提交
76 77
    if parallel:
        train_exe = fluid.ParallelExecutor(
Z
zhangwenhui03 已提交
78
            use_cuda=use_cuda, loss_name=avg_cost.name)
F
8.3  
frankwhzhang 已提交
79
    else:
F
frankwhzhang 已提交
80
        train_exe = exe
Z
zhangwenhui03 已提交
81

F
frankwhzhang 已提交
82 83 84
    pass_num = args.pass_num
    model_dir = args.model_dir
    fetch_list = [avg_cost.name]
F
frankwhzhang 已提交
85

F
frankwhzhang 已提交
86 87 88 89
    total_time = 0.0
    for pass_idx in six.moves.xrange(pass_num):
        epoch_idx = pass_idx + 1
        print("epoch_%d start" % epoch_idx)
F
frankwhzhang 已提交
90

F
frankwhzhang 已提交
91 92 93 94 95 96 97 98 99
        t0 = time.time()
        i = 0
        newest_ppl = 0
        for data in train_reader():
            i += 1
            lod_src_wordseq = utils.to_lodtensor([dat[0] for dat in data],
                                                 place)
            lod_dst_wordseq = utils.to_lodtensor([dat[1] for dat in data],
                                                 place)
Z
zhangwenhui03 已提交
100 101 102 103 104
            ret_avg_cost = train_exe.run(feed={
                "src_wordseq": lod_src_wordseq,
                "dst_wordseq": lod_dst_wordseq
            },
                                         fetch_list=fetch_list)
F
frankwhzhang 已提交
105 106 107 108
            avg_ppl = np.exp(ret_avg_cost[0])
            newest_ppl = np.mean(avg_ppl)
            if i % args.print_batch == 0:
                print("step:%d ppl:%.3f" % (i, newest_ppl))
F
frankwhzhang 已提交
109

F
frankwhzhang 已提交
110 111 112 113 114 115 116 117 118 119 120
        t1 = time.time()
        total_time += t1 - t0
        print("epoch:%d num_steps:%d time_cost(s):%f" %
              (epoch_idx, i, total_time / epoch_idx))
        save_dir = "%s/epoch_%d" % (model_dir, epoch_idx)
        feed_var_names = ["src_wordseq", "dst_wordseq"]
        fetch_vars = [avg_cost, acc]
        fluid.io.save_inference_model(save_dir, feed_var_names, fetch_vars, exe)
        print("model saved in %s" % save_dir)
    #exe.close()
    print("finish training")
F
frankwhzhang 已提交
121

F
frankwhzhang 已提交
122 123

if __name__ == "__main__":
F
frankwhzhang 已提交
124
    train()