train.py 11.3 KB
Newer Older
0
0YuanZhang0 已提交
1
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. 
Y
Yibing Liu 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning on dialogue tasks."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import sys
import time
import numpy as np

import paddle
import paddle.fluid as fluid

0
0YuanZhang0 已提交
28 29 30 31 32 33 34 35
from dgu_net import create_net
import dgu.reader as reader
from dgu.optimization import optimization
import dgu.define_paradigm as define_paradigm 
from dgu.utils.configure import PDConfig
from dgu.utils.input_field import InputField
from dgu.utils.model_check import check_cuda
import dgu.utils.save_load_io as save_load_io
Y
Yibing Liu 已提交
36 37


0
0YuanZhang0 已提交
38 39 40
def do_train(args):
    """train function"""
    
Y
Yibing Liu 已提交
41 42 43 44 45 46 47 48 49 50 51 52
    task_name = args.task_name.lower()
    paradigm_inst = define_paradigm.Paradigm(task_name)

    processors = {
        'udc': reader.UDCProcessor,
        'swda': reader.SWDAProcessor,
        'mrda': reader.MRDAProcessor,
        'atis_slot': reader.ATISSlotProcessor,
        'atis_intent': reader.ATISIntentProcessor,
        'dstc2': reader.DSTC2Processor,
    }

0
0YuanZhang0 已提交
53 54
    train_prog = fluid.default_main_program()
    startup_prog = fluid.default_startup_program()
Y
Yibing Liu 已提交
55

0
0YuanZhang0 已提交
56 57
    with fluid.program_guard(train_prog, startup_prog): 
        train_prog.random_seed = args.random_seed
Y
Yibing Liu 已提交
58
        startup_prog.random_seed = args.random_seed
0
0YuanZhang0 已提交
59 60 61
        with fluid.unique_name.guard(): 
            num_labels = len(processors[task_name].get_labels())

0
0YuanZhang0 已提交
62 63 64 65 66 67 68 69
            src_ids = fluid.data(
                        name='src_ids', shape=[-1, args.max_seq_len], dtype='int64')
            pos_ids = fluid.data(
                        name='pos_ids', shape=[-1, args.max_seq_len], dtype='int64')
            sent_ids = fluid.data(
                        name='sent_ids', shape=[-1, args.max_seq_len], dtype='int64')
            input_mask = fluid.data(
                        name='input_mask', shape=[-1, args.max_seq_len], dtype='float32')
0
0YuanZhang0 已提交
70
            if args.task_name == 'atis_slot': 
0
0YuanZhang0 已提交
71 72
                labels = fluid.data(
                        name='labels', shape=[-1, args.max_seq_len], dtype='int64')
0
0YuanZhang0 已提交
73
            elif args.task_name in ['dstc2']:
0
0YuanZhang0 已提交
74 75
                labels = fluid.data(
                        name='labels', shape=[-1, num_labels], dtype='int64')
0
0YuanZhang0 已提交
76
            else: 
0
0YuanZhang0 已提交
77 78
                labels = fluid.data(
                        name='labels', shape=[-1, 1], dtype='int64')
0
0YuanZhang0 已提交
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
            
            input_inst = [src_ids, pos_ids, sent_ids, input_mask, labels]
            input_field = InputField(input_inst)
            data_reader = fluid.io.PyReader(feed_list=input_inst, 
                        capacity=4, iterable=False)
            processor = processors[task_name](data_dir=args.data_dir,
                                              vocab_path=args.vocab_path,
                                              max_seq_len=args.max_seq_len,
                                              do_lower_case=args.do_lower_case,
                                              in_tokens=args.in_tokens,
                                              task_name=task_name,
                                              random_seed=args.random_seed)

            results = create_net(
                    is_training=True, 
                    model_input=input_field, 
Y
Yibing Liu 已提交
95
                    num_labels=num_labels,
0
0YuanZhang0 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
                    paradigm_inst=paradigm_inst,
                    args=args)
            
            loss = results.get("loss", None)
            probs = results.get("probs", None)
            accuracy = results.get("accuracy", None)
            num_seqs = results.get("num_seqs", None)

            loss.persistable = True
            probs.persistable = True
            if accuracy: 
                accuracy.persistable = True
            num_seqs.persistable = True

            if args.use_cuda: 
                dev_count = fluid.core.get_cuda_device_count()
            else: 
0
0YuanZhang0 已提交
113
                dev_count = int(os.environ.get('CPU_NUM', 1))
0
0YuanZhang0 已提交
114 115 116 117 118 119 120 121 122 123
            
            batch_generator = processor.data_generator(
                batch_size=args.batch_size,
                phase='train',
                shuffle=True)
            num_train_examples = processor.get_num_examples(phase='train')
            
            if args.in_tokens:
                max_train_steps = args.epoch * num_train_examples // (
                    args.batch_size // args.max_seq_len) // dev_count
P
pkpk 已提交
124
            else:
0
0YuanZhang0 已提交
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
                max_train_steps = args.epoch * num_train_examples // args.batch_size // dev_count

            warmup_steps = int(max_train_steps * args.warmup_proportion)
            print("Num train examples: %d" % num_train_examples)
            print("Max train steps: %d" % max_train_steps)
            print("Num warmup steps: %d" % warmup_steps)

            optimizor = optimization(
                loss=loss,
                warmup_steps=warmup_steps,
                num_train_steps=max_train_steps,
                learning_rate=args.learning_rate,
                train_program=train_prog,
                startup_prog=startup_prog,
                weight_decay=args.weight_decay,
                scheduler=args.lr_scheduler,
0
0YuanZhang0 已提交
141
                use_fp16=False,
0
0YuanZhang0 已提交
142 143 144
                loss_scaling=args.loss_scaling)

    data_reader.decorate_batch_generator(batch_generator)
Y
Yibing Liu 已提交
145

0
0YuanZhang0 已提交
146 147
    if args.use_cuda:
        place = fluid.CUDAPlace(int(os.getenv('FLAGS_selected_gpus', '0')))
P
pkpk 已提交
148
    else:
0
0YuanZhang0 已提交
149 150 151 152
        place = fluid.CPUPlace()
    
    exe = fluid.Executor(place)
    exe.run(startup_prog)
P
pkpk 已提交
153

0
0YuanZhang0 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
    assert (args.init_from_checkpoint == "") or (
            args.init_from_pretrain_model == "")

    # init from some checkpoint, to resume the previous training
    if args.init_from_checkpoint: 
        save_load_io.init_from_checkpoint(args, exe, train_prog)
    
    # init from some pretrain models, to better solve the current task
    if args.init_from_pretrain_model: 
        save_load_io.init_from_pretrain_model(args, exe, train_prog)

    build_strategy = fluid.compiler.BuildStrategy()
    build_strategy.enable_inplace = True

    compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel(
                loss_name=loss.name, build_strategy=build_strategy)
    
    # start training
    steps = 0
    time_begin = time.time()
    ce_info = []
    for epoch_step in range(args.epoch): 
        data_reader.start()
Y
Yibing Liu 已提交
177
        while True:
P
pkpk 已提交
178
            try:
Y
Yibing Liu 已提交
179
                steps += 1
0
0YuanZhang0 已提交
180
                if steps % args.print_steps == 0:
P
pkpk 已提交
181 182 183 184 185 186
                    if warmup_steps <= 0:
                        if accuracy is not None:
                            fetch_list = [
                                loss.name, accuracy.name, num_seqs.name
                            ]
                        else:
Y
Yibing Liu 已提交
187
                            fetch_list = [loss.name, num_seqs.name]
P
pkpk 已提交
188
                    else:
Y
Yibing Liu 已提交
189 190
                        if accuracy is not None:
                            fetch_list = [
0
0YuanZhang0 已提交
191
                                loss.name, accuracy.name, optimizor.name,
Y
Yibing Liu 已提交
192 193
                                num_seqs.name
                            ]
P
pkpk 已提交
194 195
                        else:
                            fetch_list = [
0
0YuanZhang0 已提交
196
                                loss.name, optimizor.name, num_seqs.name
P
pkpk 已提交
197 198
                            ]
                else:
Y
Yibing Liu 已提交
199 200
                    fetch_list = []

0
0YuanZhang0 已提交
201
                outputs = exe.run(compiled_train_prog, fetch_list=fetch_list)
Y
Yibing Liu 已提交
202

0
0YuanZhang0 已提交
203
                if steps % args.print_steps == 0:
P
pkpk 已提交
204 205
                    if warmup_steps <= 0:
                        if accuracy is not None:
Y
Yibing Liu 已提交
206
                            np_loss, np_acc, np_num_seqs = outputs
P
pkpk 已提交
207
                        else:
Y
Yibing Liu 已提交
208
                            np_loss, np_num_seqs = outputs
P
pkpk 已提交
209
                    else:
Y
Yibing Liu 已提交
210 211
                        if accuracy is not None:
                            np_loss, np_acc, np_lr, np_num_seqs = outputs
P
pkpk 已提交
212
                        else:
Y
Yibing Liu 已提交
213 214 215 216
                            np_loss, np_lr, np_num_seqs = outputs

                    time_end = time.time()
                    used_time = time_end - time_begin
P
pkpk 已提交
217 218
                    current_time = time.strftime('%Y-%m-%d %H:%M:%S',
                                                 time.localtime(time.time()))
0
0YuanZhang0 已提交
219
                    if accuracy is not None: 
P
pkpk 已提交
220
                        print(
0
0YuanZhang0 已提交
221
                            "%s epoch: %d, step: %d, ave loss: %f, "
P
pkpk 已提交
222
                            "ave acc: %f, speed: %f steps/s" %
0
0YuanZhang0 已提交
223 224 225 226
                            (current_time, epoch_step, steps,
                             np.mean(np_loss),
                             np.mean(np_acc),
                             args.print_steps / used_time))
P
pkpk 已提交
227
                        ce_info.append([
0
0YuanZhang0 已提交
228 229 230
                            np.mean(np_loss),
                            np.mean(np_acc),
                            args.print_steps / used_time
P
pkpk 已提交
231 232 233
                        ])
                    else:
                        print(
0
0YuanZhang0 已提交
234
                            "%s epoch: %d, step: %d, ave loss: %f, "
Y
Yibing Liu 已提交
235
                            "speed: %f steps/s" %
0
0YuanZhang0 已提交
236 237 238
                            (current_time, epoch_step, steps,
                             np.mean(np_loss),
                             args.print_steps / used_time))
P
pkpk 已提交
239
                        ce_info.append([
0
0YuanZhang0 已提交
240 241
                            np.mean(np_loss),
                            args.print_steps / used_time
P
pkpk 已提交
242
                        ])
Y
Yibing Liu 已提交
243 244
                    time_begin = time.time()

0
0YuanZhang0 已提交
245 246 247 248 249 250 251 252 253
                if steps % args.save_steps == 0: 
                    save_path = "step_" + str(steps)
                    if args.save_checkpoint: 
                        save_load_io.save_checkpoint(args, exe, train_prog, save_path)
                    if args.save_param:
                        save_load_io.save_param(args, exe, train_prog, save_path)
                 
            except fluid.core.EOFException:  
                data_reader.reset()
Y
Yibing Liu 已提交
254
                break
0
0YuanZhang0 已提交
255 256 257 258 259 260 261 262 263 264 265 266 267 268
    if args.save_checkpoint: 
        save_load_io.save_checkpoint(args, exe, train_prog, "step_final")
    if args.save_param:
        save_load_io.save_param(args, exe, train_prog, "step_final")

    def get_cards():
        num = 0
        cards = os.environ.get('CUDA_VISIBLE_DEVICES', '')
        print("test_cards", cards)
        if cards != '':
            num = len(cards.split(","))
        return num
    
    if args.enable_ce:
Z
zhengya01 已提交
269
        card_num = get_cards()
0
0YuanZhang0 已提交
270
        print("test_card_num", card_num)
Z
zhengya01 已提交
271 272 273 274 275 276 277 278 279 280
        ce_loss = 0
        ce_acc = 0
        ce_time = 0
        try:
            ce_loss = ce_info[-2][0]
            ce_acc = ce_info[-2][1]
            ce_time = ce_info[-2][2]
        except:
            print("ce info error")
        print("kpis\teach_step_duration_%s_card%s\t%s" %
P
pkpk 已提交
281 282 283
              (task_name, card_num, ce_time))
        print("kpis\ttrain_loss_%s_card%s\t%f" % (task_name, card_num, ce_loss))
        print("kpis\ttrain_acc_%s_card%s\t%f" % (task_name, card_num, ce_acc))
Z
zhengya01 已提交
284 285


0
0YuanZhang0 已提交
286 287 288 289 290
if __name__ == '__main__': 
    
    args = PDConfig(yaml_file="./data/config/dgu.yaml")
    args.build()
    args.Print()
P
pkpk 已提交
291 292 293

    check_cuda(args.use_cuda)

0
0YuanZhang0 已提交
294
    do_train(args)