predict.py 2.8 KB
Newer Older
Z
Zeyu Chen 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import io
import numpy as np

import paddle
from args import parse_args

from seq2seq_attn import Seq2SeqAttnInferModel
L
LiuChiachi 已提交
22 23
from data import create_infer_loader
from paddlenlp.datasets import IWSLT15
Z
Zeyu Chen 已提交
24 25


L
LiuChiachi 已提交
26
def post_process_seq(seq, bos_idx, eos_idx, output_bos=False, output_eos=False):
Z
Zeyu Chen 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
    """
    Post-process the decoded sequence.
    """
    eos_pos = len(seq) - 1
    for i, idx in enumerate(seq):
        if idx == eos_idx:
            eos_pos = i
            break
    seq = [
        idx for idx in seq[:eos_pos + 1]
        if (output_bos or idx != bos_idx) and (output_eos or idx != eos_idx)
    ]
    return seq


def do_predict(args):
    device = paddle.set_device("gpu" if args.use_gpu else "cpu")

L
LiuChiachi 已提交
45 46 47 48
    test_loader, src_vocab_size, tgt_vocab_size, bos_id, eos_id = create_infer_loader(
        args)
    _, vocab = IWSLT15.get_vocab()
    trg_idx2word = vocab.idx_to_token
Z
Zeyu Chen 已提交
49 50 51

    model = paddle.Model(
        Seq2SeqAttnInferModel(
L
LiuChiachi 已提交
52 53
            src_vocab_size,
            tgt_vocab_size,
Z
Zeyu Chen 已提交
54 55 56 57 58 59 60 61 62 63 64 65
            args.hidden_size,
            args.hidden_size,
            args.num_layers,
            args.dropout,
            bos_id=bos_id,
            eos_id=eos_id,
            beam_size=args.beam_size,
            max_out_len=256))

    model.prepare()

    # Load the trained model
L
LiuChiachi 已提交
66
    assert args.init_from_ckpt, (
Z
Zeyu Chen 已提交
67
        "Please set reload_model to load the infer model.")
L
LiuChiachi 已提交
68
    model.load(args.init_from_ckpt)
Z
Zeyu Chen 已提交
69 70

    with io.open(args.infer_output_file, 'w', encoding='utf-8') as f:
L
LiuChiachi 已提交
71 72 73
        for data in test_loader():
            with paddle.no_grad():
                finished_seq = model.predict_batch(inputs=data)[0]
Z
Zeyu Chen 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
            finished_seq = finished_seq[:, :, np.newaxis] if len(
                finished_seq.shape) == 2 else finished_seq
            finished_seq = np.transpose(finished_seq, [0, 2, 1])
            for ins in finished_seq:
                for beam_idx, beam in enumerate(ins):
                    id_list = post_process_seq(beam, bos_id, eos_id)
                    word_list = [trg_idx2word[id] for id in id_list]
                    sequence = " ".join(word_list) + "\n"
                    f.write(sequence)
                    break


if __name__ == "__main__":
    args = parse_args()
    do_predict(args)