run_glue.py 17.1 KB
Newer Older
Z
Zeyu Chen 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import logging
import os
import random
import time
from functools import partial

import numpy as np
import paddle
from paddle.io import DataLoader

26 27
from paddle.metric import Accuracy
from paddlenlp.datasets import GlueCoLA, GlueSST2, GlueMRPC, GlueSTSB, GlueMNLI, GlueQNLI, GlueRTE
Z
Zeyu Chen 已提交
28 29 30
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.data.sampler import SamplerHelper
from paddlenlp.transformers import BertForSequenceClassification, BertTokenizer
31
from paddlenlp.transformers import ErnieForSequenceClassification, ErnieTokenizer
32
from paddlenlp.transformers import LinearDecayWithWarmup
33 34
from paddlenlp.metrics import Mcc, PearsonAndSpearman
from paddlenlp.utils.log import logger
Z
Zeyu Chen 已提交
35 36

TASK_CLASSES = {
37 38 39 40 41 42
    "cola": (GlueCoLA, Mcc),
    "sst-2": (GlueSST2, Accuracy),
    "sts-b": (GlueSTSB, PearsonAndSpearman),
    "mnli": (GlueMNLI, Accuracy),
    "qnli": (GlueQNLI, Accuracy),
    "rte": (GlueRTE, Accuracy),
Z
Zeyu Chen 已提交
43 44
}

45 46 47 48
MODEL_CLASSES = {
    "bert": (BertForSequenceClassification, BertTokenizer),
    "ernie": (ErnieForSequenceClassification, ErnieTokenizer),
}
Z
Zeyu Chen 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142


def parse_args():
    parser = argparse.ArgumentParser()

    # Required parameters
    parser.add_argument(
        "--task_name",
        default=None,
        type=str,
        required=True,
        help="The name of the task to train selected in the list: " +
        ", ".join(TASK_CLASSES.keys()), )
    parser.add_argument(
        "--model_type",
        default=None,
        type=str,
        required=True,
        help="Model type selected in the list: " +
        ", ".join(MODEL_CLASSES.keys()), )
    parser.add_argument(
        "--model_name_or_path",
        default=None,
        type=str,
        required=True,
        help="Path to pre-trained model or shortcut name selected in the list: "
        + ", ".join(
            sum([
                list(classes[-1].pretrained_init_configuration.keys())
                for classes in MODEL_CLASSES.values()
            ], [])), )
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help="The output directory where the model predictions and checkpoints will be written.",
    )
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help="The maximum total input sequence length after tokenization. Sequences longer "
        "than this will be truncated, sequences shorter will be padded.", )
    parser.add_argument(
        "--batch_size",
        default=8,
        type=int,
        help="Batch size per GPU/CPU for training.", )
    parser.add_argument(
        "--learning_rate",
        default=5e-5,
        type=float,
        help="The initial learning rate for Adam.")
    parser.add_argument(
        "--weight_decay",
        default=0.0,
        type=float,
        help="Weight decay if we apply some.")
    parser.add_argument(
        "--adam_epsilon",
        default=1e-8,
        type=float,
        help="Epsilon for Adam optimizer.")
    parser.add_argument(
        "--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
    parser.add_argument(
        "--num_train_epochs",
        default=3,
        type=int,
        help="Total number of training epochs to perform.", )
    parser.add_argument(
        "--max_steps",
        default=-1,
        type=int,
        help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
    )
    parser.add_argument(
        "--warmup_steps",
        default=0,
        type=int,
        help="Linear warmup over warmup_steps.")
    parser.add_argument(
        "--logging_steps",
        type=int,
        default=500,
        help="Log every X updates steps.")
    parser.add_argument(
        "--save_steps",
        type=int,
        default=500,
        help="Save checkpoint every X updates steps.")
    parser.add_argument(
        "--seed", type=int, default=42, help="Random seed for initialization")
143 144 145 146 147
    parser.add_argument(
        "--select_device",
        type=str,
        default="gpu",
        help="Device for selecting for the training.")
Z
Zeyu Chen 已提交
148 149 150 151
    args = parser.parse_args()
    return args


152
def create_data_holder(task_name):
Z
Zeyu Chen 已提交
153 154 155 156
    input_ids = paddle.static.data(
        name="input_ids", shape=[-1, -1], dtype="int64")
    segment_ids = paddle.static.data(
        name="segment_ids", shape=[-1, -1], dtype="int64")
157 158 159 160
    if task_name == "sts-b":
        label = paddle.static.data(name="label", shape=[-1, 1], dtype="float32")
    else:
        label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
Z
Zeyu Chen 已提交
161 162 163 164

    return [input_ids, segment_ids, label]


165
def reset_program_state_dict(args, model, state_dict, pretrained_state_dict):
Z
Zeyu Chen 已提交
166 167
    reset_state_dict = {}
    scale = model.initializer_range if hasattr(model, "initializer_range")\
168
        else getattr(model, args.model_type).config["initializer_range"]
Z
Zeyu Chen 已提交
169 170 171 172 173 174 175 176 177 178 179 180 181
    for n, p in state_dict.items():
        if n not in pretrained_state_dict:
            dtype_str = "float32"
            if str(p.dtype) == "VarType.FP64":
                dtype_str = "float64"
            reset_state_dict[p.name] = np.random.normal(
                loc=0.0, scale=scale, size=p.shape).astype(dtype_str)
        else:
            reset_state_dict[p.name] = pretrained_state_dict[n]
    return reset_state_dict


def set_seed(args):
G
Guo Sheng 已提交
182 183 184 185 186 187 188
    # Use the same data seed(for data shuffle) for all procs to guarantee data
    # consistency after sharding.
    random.seed(args.seed)
    np.random.seed(args.seed)
    # Maybe different op seeds(for dropout) for different procs is better. By:
    # `paddle.seed(args.seed + paddle.distributed.get_rank())`
    paddle.seed(args.seed)
Z
Zeyu Chen 已提交
189 190 191 192


def evaluate(exe, metric, loss, correct, dev_program, data_loader):
    metric.reset()
193 194 195 196 197
    returns = [loss]
    if isinstance(correct, list) or isinstance(correct, tuple):
        returns.extend(list(correct))
    else:
        returns.append(correct)
Z
Zeyu Chen 已提交
198
    for batch in data_loader:
199 200 201 202 203 204 205
        exe.run(dev_program, feed=batch, \
           fetch_list=returns)
        return_numpys = exe.run(dev_program, feed=batch, \
           fetch_list=returns)
        metric_numpy = return_numpys[1] if len(return_numpys[
            1:]) == 1 else return_numpys[1:]
        metric.update(metric_numpy)
Z
Zeyu Chen 已提交
206
        accuracy = metric.accumulate()
207
    print("eval loss: %f, acc: %s" % (return_numpys[0], accuracy))
Z
Zeyu Chen 已提交
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284


def convert_example(example,
                    tokenizer,
                    label_list,
                    max_seq_length=512,
                    is_test=False):
    """convert a glue example into necessary features"""

    def _truncate_seqs(seqs, max_seq_length):
        if len(seqs) == 1:  # single sentence
            # Account for [CLS] and [SEP] with "- 2"
            seqs[0] = seqs[0][0:(max_seq_length - 2)]
        else:  # sentence pair
            # Account for [CLS], [SEP], [SEP] with "- 3"
            tokens_a, tokens_b = seqs
            max_seq_length -= 3
            while True:  # truncate with longest_first strategy
                total_length = len(tokens_a) + len(tokens_b)
                if total_length <= max_seq_length:
                    break
                if len(tokens_a) > len(tokens_b):
                    tokens_a.pop()
                else:
                    tokens_b.pop()
        return seqs

    def _concat_seqs(seqs, separators, seq_mask=0, separator_mask=1):
        concat = sum((seq + sep for sep, seq in zip(separators, seqs)), [])
        segment_ids = sum(
            ([i] * (len(seq) + len(sep))
             for i, (sep, seq) in enumerate(zip(separators, seqs))), [])
        if isinstance(seq_mask, int):
            seq_mask = [[seq_mask] * len(seq) for seq in seqs]
        if isinstance(separator_mask, int):
            separator_mask = [[separator_mask] * len(sep) for sep in separators]
        p_mask = sum((s_mask + mask
                      for sep, seq, s_mask, mask in zip(
                          separators, seqs, seq_mask, separator_mask)), [])
        return concat, segment_ids, p_mask

    if not is_test:
        # `label_list == None` is for regression task
        label_dtype = "int64" if label_list else "float32"
        # get the label
        label = example[-1]
        example = example[:-1]
        #create label maps if classification task
        if label_list:
            label_map = {}
            for (i, l) in enumerate(label_list):
                label_map[l] = i
            label = label_map[label]
        label = [label]
        #label = np.array([label], dtype=label_dtype)
    # tokenize raw text
    tokens_raw = [tokenizer(l) for l in example]
    # truncate to the truncate_length,
    tokens_trun = _truncate_seqs(tokens_raw, max_seq_length)
    # concate the sequences with special tokens
    tokens_trun[0] = [tokenizer.cls_token] + tokens_trun[0]
    tokens, segment_ids, _ = _concat_seqs(tokens_trun, [[tokenizer.sep_token]] *
                                          len(tokens_trun))
    # convert the token to ids
    input_ids = tokenizer.convert_tokens_to_ids(tokens)
    # The mask has 1 for real tokens and 0 for padding tokens. Only real
    # tokens are attended to.
    # input_mask = [1] * len(input_ids)
    if not is_test:
        return input_ids, segment_ids, label
    else:
        return input_ids, segment_ids


def do_train(args):
    # Set the paddle execute enviroment
    paddle.enable_static()
285
    place = paddle.set_device(args.select_device)
Z
Zeyu Chen 已提交
286 287
    set_seed(args)

G
Guo Sheng 已提交
288
    # Create the main_program for the training and dev_program for the validation
Z
Zeyu Chen 已提交
289 290 291 292
    main_program = paddle.static.default_main_program()
    startup_program = paddle.static.default_startup_program()
    dev_program = paddle.static.Program()

G
Guo Sheng 已提交
293
    # Get the configuration of tokenizer and model
Z
Zeyu Chen 已提交
294 295 296 297 298 299 300
    args.task_name = args.task_name.lower()
    args.model_type = args.model_type.lower()
    model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
    dataset_class, metric_class = TASK_CLASSES[args.task_name]

    # Create the tokenizer and dataset
    tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
301
    train_dataset = dataset_class.get_datasets(["train"])
Z
Zeyu Chen 已提交
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323

    trans_func = partial(
        convert_example,
        tokenizer=tokenizer,
        label_list=train_dataset.get_labels(),
        max_seq_length=args.max_seq_length)

    train_dataset = train_dataset.apply(trans_func, lazy=True)

    batchify_fn = lambda samples, fn=Tuple(
        Pad(axis=0, pad_val=tokenizer.pad_token_id),  # input
        Pad(axis=0, pad_val=tokenizer.pad_token_id),  # segment
        Stack(dtype="int64" if train_dataset.get_labels() else "float32")  # label
    ): [data for i, data in enumerate(fn(samples))]

    train_batch_sampler = paddle.io.BatchSampler(
        train_dataset, batch_size=args.batch_size, shuffle=True)

    feed_list_name = []

    # Define the input data and create the train/dev data_loader
    with paddle.static.program_guard(main_program, startup_program):
324
        [input_ids, segment_ids, labels] = create_data_holder(args.task_name)
Z
Zeyu Chen 已提交
325 326 327 328 329 330 331 332 333

    train_data_loader = DataLoader(
        dataset=train_dataset,
        feed_list=[input_ids, segment_ids, labels],
        batch_sampler=train_batch_sampler,
        collate_fn=batchify_fn,
        num_workers=0,
        return_list=False)

334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
    if args.task_name == "mnli":
        dev_dataset_matched, dev_dataset_mismatched = dataset_class.get_datasets(
            ["dev_matched", "dev_mismatched"])
        dev_dataset_matched = dev_dataset_matched.apply(trans_func, lazy=True)
        dev_dataset_mismatched = dev_dataset_mismatched.apply(
            trans_func, lazy=True)
        dev_batch_sampler_matched = paddle.io.BatchSampler(
            dev_dataset_matched, batch_size=args.batch_size, shuffle=False)
        dev_data_loader_matched = DataLoader(
            dataset=dev_dataset_matched,
            batch_sampler=dev_batch_sampler_matched,
            feed_list=[input_ids, segment_ids, labels],
            collate_fn=batchify_fn,
            num_workers=0,
            return_list=False)
        dev_batch_sampler_mismatched = paddle.io.BatchSampler(
            dev_dataset_mismatched, batch_size=args.batch_size, shuffle=False)
        dev_data_loader_mismatched = DataLoader(
            dataset=dev_dataset_mismatched,
            feed_list=[input_ids, segment_ids, labels],
            batch_sampler=dev_batch_sampler_mismatched,
            collate_fn=batchify_fn,
            num_workers=0,
            return_list=False)
    else:
        dev_dataset = dataset_class.get_datasets(["dev"])
        dev_dataset = dev_dataset.apply(trans_func, lazy=True)
        dev_batch_sampler = paddle.io.BatchSampler(
            dev_dataset, batch_size=args.batch_size, shuffle=False)
        dev_data_loader = DataLoader(
            dataset=dev_dataset,
            feed_list=[input_ids, segment_ids, labels],
            batch_sampler=dev_batch_sampler,
            collate_fn=batchify_fn,
            num_workers=0,
            return_list=False)
Z
Zeyu Chen 已提交
370 371 372

    # Create the training-forward program, and clone it for the validation
    with paddle.static.program_guard(main_program, startup_program):
373 374
        num_class = 1 if train_dataset.get_labels() is None else len(
            train_dataset.get_labels())
Z
Zeyu Chen 已提交
375
        model, pretrained_state_dict = model_class.from_pretrained(
376
            args.model_name_or_path, num_classes=num_class)
Z
Zeyu Chen 已提交
377 378 379 380 381 382
        loss_fct = paddle.nn.loss.CrossEntropyLoss(
        ) if train_dataset.get_labels() else paddle.nn.loss.MSELoss()
        logits = model(input_ids, segment_ids)
        loss = loss_fct(logits, labels)
        dev_program = main_program.clone(for_test=True)

G
Guo Sheng 已提交
383 384
    # Create the training-backward program, this pass will not be
    # executed in the validation
385 386
    num_training_steps = args.max_steps if args.max_steps > 0 else len(
        train_data_loader) * args.num_train_epochs
Z
Zeyu Chen 已提交
387
    with paddle.static.program_guard(main_program, startup_program):
388 389
        lr_scheduler = LinearDecayWithWarmup(
            args.learning_rate, num_training_steps, args.warmup_steps)
Z
Zeyu Chen 已提交
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
        optimizer = paddle.optimizer.AdamW(
            learning_rate=lr_scheduler,
            epsilon=args.adam_epsilon,
            parameters=model.parameters(),
            weight_decay=args.weight_decay,
            apply_decay_param_fun=lambda x: x in [
                p.name for n, p in model.named_parameters()
               if not any(nd in n for nd in ["bias", "norm"])
        ])
        optimizer.minimize(loss)

    # Create the metric pass for the validation
    with paddle.static.program_guard(dev_program, startup_program):
        metric = metric_class()
        correct = metric.compute(logits, labels)

G
Guo Sheng 已提交
406
    # Initialize the fine-tuning parameter, we will load the parameters in
Z
Zeyu Chen 已提交
407
    # pre-training model. And initialize the parameter which not in pre-training model
G
Guo Sheng 已提交
408
    # by the normal distribution.
Z
Zeyu Chen 已提交
409 410 411
    exe = paddle.static.Executor(place)
    exe.run(startup_program)
    state_dict = model.state_dict()
412
    reset_state_dict = reset_program_state_dict(args, model, state_dict,
Z
Zeyu Chen 已提交
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
                                                pretrained_state_dict)
    paddle.static.set_program_state(main_program, reset_state_dict)

    global_step = 0
    tic_train = time.time()
    for epoch in range(args.num_train_epochs):
        for step, batch in enumerate(train_data_loader):
            global_step += 1
            loss_return = exe.run(main_program, feed=batch, fetch_list=[loss])
            if global_step % args.logging_steps == 0:
                logger.info(
                    "global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s"
                    % (global_step, epoch, step, loss_return[0],
                       args.logging_steps / (time.time() - tic_train)))
                tic_train = time.time()
            lr_scheduler.step()
            if global_step % args.save_steps == 0:
G
Guo Sheng 已提交
430
                # Validation pass, record the loss and metric
431 432 433 434 435 436 437 438
                if args.task_name == "mnli":
                    evaluate(exe, metric, loss, correct, dev_program,
                             dev_data_loader_matched)
                    evaluate(exe, metric, loss, correct, dev_program,
                             dev_data_loader_mismatched)
                else:
                    evaluate(exe, metric, loss, correct, dev_program,
                             dev_data_loader)
Z
Zeyu Chen 已提交
439 440 441 442 443 444 445 446 447 448 449
                output_dir = os.path.join(args.output_dir,
                                          "model_%d" % global_step)
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir)
                paddle.fluid.io.save_params(exe, output_dir)
                tokenizer.save_pretrained(output_dir)


if __name__ == "__main__":
    args = parse_args()
    do_train(args)