train.py 6.2 KB
Newer Older
H
hutuxian 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#  Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.

Z
zhengya01 已提交
15
import os
H
hutuxian 已提交
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
import sys
import logging
import time
import numpy as np
import argparse
import paddle.fluid as fluid
import paddle
import time
import network
import reader
import random

logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)


def parse_args():
    parser = argparse.ArgumentParser("din")
    parser.add_argument(
Z
zhang wenhui 已提交
36 37 38 39
        '--config_path',
        type=str,
        default='data/config.txt',
        help='dir of config')
H
hutuxian 已提交
40
    parser.add_argument(
Z
zhang wenhui 已提交
41 42 43 44
        '--train_dir',
        type=str,
        default='data/paddle_train.txt',
        help='dir of train file')
H
hutuxian 已提交
45
    parser.add_argument(
Z
zhang wenhui 已提交
46 47 48 49
        '--model_dir',
        type=str,
        default='din_amazon',
        help='dir of saved model')
H
hutuxian 已提交
50 51 52 53 54 55 56
    parser.add_argument(
        '--batch_size', type=int, default=16, help='number of batch size')
    parser.add_argument(
        '--epoch_num', type=int, default=200, help='number of epoch')
    parser.add_argument(
        '--use_cuda', type=int, default=0, help='whether to use gpu')
    parser.add_argument(
Z
zhang wenhui 已提交
57 58 59 60
        '--parallel',
        type=int,
        default=0,
        help='whether to use parallel executor')
H
hutuxian 已提交
61 62 63 64
    parser.add_argument(
        '--base_lr', type=float, default=0.85, help='based learning rate')
    parser.add_argument(
        '--num_devices', type=int, default=1, help='Number of GPU devices')
Z
zhengya01 已提交
65
    parser.add_argument(
Z
zhang wenhui 已提交
66 67 68 69
        '--enable_ce',
        action='store_true',
        help='If set, run the task with continuous evaluation logs.')
    parser.add_argument('--batch_num', type=int, help="batch num for ce")
H
hutuxian 已提交
70 71 72 73 74 75 76
    args = parser.parse_args()
    return args


def train():
    args = parse_args()

Z
zhengya01 已提交
77 78 79 80 81
    if args.enable_ce:
        SEED = 102
        fluid.default_main_program().random_seed = SEED
        fluid.default_startup_program().random_seed = SEED

H
hutuxian 已提交
82 83 84 85 86 87 88 89 90 91 92 93
    config_path = args.config_path
    train_path = args.train_dir
    epoch_num = args.epoch_num
    use_cuda = True if args.use_cuda else False
    use_parallel = True if args.parallel else False

    logger.info("reading data begins")
    user_count, item_count, cat_count = reader.config_read(config_path)
    data_reader, max_len = reader.prepare_reader(train_path, args.batch_size *
                                                 args.num_devices)
    logger.info("reading data completes")

94
    avg_cost, pred, feed_list = network.network(item_count, cat_count)
Z
zhang wenhui 已提交
95 96

    clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=5.0)
H
hutuxian 已提交
97 98 99 100 101
    base_lr = args.base_lr
    boundaries = [410000]
    values = [base_lr, 0.2]
    sgd_optimizer = fluid.optimizer.SGD(
        learning_rate=fluid.layers.piecewise_decay(
Z
zhang wenhui 已提交
102 103
            boundaries=boundaries, values=values),
        grad_clip=clip)
H
hutuxian 已提交
104 105 106 107 108 109 110
    sgd_optimizer.minimize(avg_cost)

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()

    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

111 112
    loader = fluid.io.DataLoader.from_generator(
        feed_list=feed_list, capacity=10000, iterable=True)
H
hutuxian 已提交
113
    loader.set_sample_list_generator(data_reader, places=place)
H
hutuxian 已提交
114 115 116 117 118 119 120 121 122 123 124
    if use_parallel:
        train_exe = fluid.ParallelExecutor(
            use_cuda=use_cuda, loss_name=avg_cost.name)
    else:
        train_exe = exe

    logger.info("train begins")

    global_step = 0
    PRINT_STEP = 1000

Z
zhengya01 已提交
125 126
    total_time = []
    ce_info = []
H
hutuxian 已提交
127 128 129 130
    start_time = time.time()
    loss_sum = 0.0
    for id in range(epoch_num):
        epoch = id + 1
131
        for data in loader():
H
hutuxian 已提交
132
            global_step += 1
133
            results = train_exe.run(feed=data,
H
hutuxian 已提交
134 135 136 137 138
                                    fetch_list=[avg_cost.name, pred.name],
                                    return_numpy=True)
            loss_sum += results[0].mean()

            if global_step % PRINT_STEP == 0:
Z
zhengya01 已提交
139 140
                ce_info.append(loss_sum / PRINT_STEP)
                total_time.append(time.time() - start_time)
H
hutuxian 已提交
141 142 143 144 145 146 147 148
                logger.info(
                    "epoch: %d\tglobal_step: %d\ttrain_loss: %.4f\t\ttime: %.2f"
                    % (epoch, global_step, loss_sum / PRINT_STEP,
                       time.time() - start_time))
                start_time = time.time()
                loss_sum = 0.0

                if (global_step > 400000 and global_step % PRINT_STEP == 0) or (
149
                        global_step <= 400000 and global_step % 50000 == 0):
Z
zhang wenhui 已提交
150 151
                    save_dir = os.path.join(args.model_dir,
                                            "global_step_" + str(global_step))
H
hutuxian 已提交
152 153 154 155 156 157 158 159 160
                    feed_var_name = [
                        "hist_item_seq", "hist_cat_seq", "target_item",
                        "target_cat", "label", "mask", "target_item_seq",
                        "target_cat_seq"
                    ]
                    fetch_vars = [avg_cost, pred]
                    fluid.io.save_inference_model(save_dir, feed_var_name,
                                                  fetch_vars, exe)
                    logger.info("model saved in " + save_dir)
Z
zhengya01 已提交
161 162 163 164 165 166 167 168 169 170 171 172
            if args.enable_ce and global_step >= args.batch_num:
                break
    # only for ce
    if args.enable_ce:
        gpu_num = get_cards(args)
        ce_loss = 0
        ce_time = 0
        try:
            ce_loss = ce_info[-1]
            ce_time = total_time[-1]
        except:
            print("ce info error")
Z
zhang wenhui 已提交
173 174
        print("kpis\teach_pass_duration_card%s\t%s" % (gpu_num, ce_time))
        print("kpis\ttrain_loss_card%s\t%s" % (gpu_num, ce_loss))
Z
zhengya01 已提交
175 176 177 178 179 180 181 182 183


def get_cards(args):
    if args.enable_ce:
        cards = os.environ.get('CUDA_VISIBLE_DEVICES')
        num = len(cards.split(","))
        return num
    else:
        return args.num_devices
H
hutuxian 已提交
184 185 186 187


if __name__ == "__main__":
    train()