imikolov.py 5.0 KB
Newer Older
D
dangqingqing 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
"""
Q
qijun 已提交
15
imikolov's simple dataset.
Y
Yu Yang 已提交
16

M
minqiyang 已提交
17
This module will download dataset from
Q
qijun 已提交
18 19
http://www.fit.vutbr.cz/~imikolov/rnnlm/ and parse training set and test set
into paddle reader creators.
20
"""
21 22 23

from __future__ import print_function

24
import paddle.dataset.common
25
import collections
26
import tarfile
27
import six
28

29
__all__ = ['train', 'test', 'build_dict']
30 31 32 33 34

URL = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz'
MD5 = '30177ea32e27c525793142b6bf2c8e2d'


35 36 37 38 39
class DataType(object):
    NGRAM = 1
    SEQ = 2


40
def word_count(f, word_freq=None):
41 42
    if word_freq is None:
        word_freq = collections.defaultdict(int)
43 44 45

    for l in f:
        for w in l.strip().split():
46 47 48
            word_freq[w] += 1
        word_freq['<s>'] += 1
        word_freq['<e>'] += 1
49 50 51 52

    return word_freq


Y
yangyaming 已提交
53
def build_dict(min_word_freq=50):
Q
qijun 已提交
54
    """
Q
qijun 已提交
55 56
    Build a word dictionary from the corpus,  Keys of the dictionary are words,
    and values are zero-based IDs of these words.
Q
qijun 已提交
57
    """
58 59
    train_filename = './simple-examples/data/ptb.train.txt'
    test_filename = './simple-examples/data/ptb.valid.txt'
60
    with tarfile.open(
61 62 63
            paddle.dataset.common.download(paddle.dataset.imikolov.URL,
                                           'imikolov',
                                           paddle.dataset.imikolov.MD5)) as tf:
64 65 66
        trainf = tf.extractfile(train_filename)
        testf = tf.extractfile(test_filename)
        word_freq = word_count(testf, word_count(trainf))
67 68 69
        if '<unk>' in word_freq:
            # remove <unk> for now, since we will set it as last index
            del word_freq['<unk>']
70

71
        word_freq = [
M
minqiyang 已提交
72
            x for x in six.iteritems(word_freq) if x[1] > min_word_freq
73
        ]
74

75 76
        word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0]))
        words, _ = list(zip(*word_freq_sorted))
77
        word_idx = dict(list(zip(words, six.moves.range(len(words)))))
Y
Yi Wang 已提交
78
        word_idx['<unk>'] = len(words)
79 80 81 82

    return word_idx


83
def reader_creator(filename, word_idx, n, data_type):
84 85
    def reader():
        with tarfile.open(
86 87 88
                paddle.dataset.common.download(
                    paddle.dataset.imikolov.URL, 'imikolov',
                    paddle.dataset.imikolov.MD5)) as tf:
89 90
            f = tf.extractfile(filename)

Y
Yi Wang 已提交
91
            UNK = word_idx['<unk>']
92
            for l in f:
93 94 95 96 97
                if DataType.NGRAM == data_type:
                    assert n > -1, 'Invalid gram length'
                    l = ['<s>'] + l.strip().split() + ['<e>']
                    if len(l) >= n:
                        l = [word_idx.get(w, UNK) for w in l]
98
                        for i in six.moves.range(n, len(l) + 1):
99 100 101
                            yield tuple(l[i - n:i])
                elif DataType.SEQ == data_type:
                    l = l.strip().split()
Y
Yi Wang 已提交
102
                    l = [word_idx.get(w, UNK) for w in l]
103 104 105 106 107 108
                    src_seq = [word_idx['<s>']] + l
                    trg_seq = l + [word_idx['<e>']]
                    if n > 0 and len(src_seq) > n: continue
                    yield src_seq, trg_seq
                else:
                    assert False, 'Unknow data type'
109 110 111 112

    return reader


113
def train(word_idx, n, data_type=DataType.NGRAM):
Q
qijun 已提交
114
    """
Q
qijun 已提交
115
    imikolov training set creator.
Q
qijun 已提交
116

Q
qijun 已提交
117
    It returns a reader creator, each sample in the reader is a word ID
Q
qijun 已提交
118 119 120 121
    tuple.

    :param word_idx: word dictionary
    :type word_idx: dict
122
    :param n: sliding window size if type is ngram, otherwise max length of sequence
Q
qijun 已提交
123
    :type n: int
124 125
    :param data_type: data type (ngram or sequence)
    :type data_type: member variable of DataType (NGRAM or SEQ)
Q
qijun 已提交
126
    :return: Training reader creator
Q
qijun 已提交
127 128
    :rtype: callable
    """
129 130
    return reader_creator('./simple-examples/data/ptb.train.txt', word_idx, n,
                          data_type)
131 132


133
def test(word_idx, n, data_type=DataType.NGRAM):
Q
qijun 已提交
134 135 136
    """
    imikolov test set creator.

Q
qijun 已提交
137
    It returns a reader creator, each sample in the reader is a word ID
Q
qijun 已提交
138 139 140 141
    tuple.

    :param word_idx: word dictionary
    :type word_idx: dict
142
    :param n: sliding window size if type is ngram, otherwise max length of sequence
Q
qijun 已提交
143
    :type n: int
144 145
    :param data_type: data type (ngram or sequence)
    :type data_type: member variable of DataType (NGRAM or SEQ)
Q
qijun 已提交
146
    :return: Test reader creator
Q
qijun 已提交
147 148
    :rtype: callable
    """
149 150
    return reader_creator('./simple-examples/data/ptb.valid.txt', word_idx, n,
                          data_type)
Y
Yancey1989 已提交
151 152


153
def fetch():
154
    paddle.dataset.common.download(URL, "imikolov", MD5)