imikolov.py 5.3 KB
Newer Older
D
dangqingqing 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
"""
Q
qijun 已提交
15
imikolov's simple dataset.
Y
Yu Yang 已提交
16

M
minqiyang 已提交
17
This module will download dataset from
Q
qijun 已提交
18 19
http://www.fit.vutbr.cz/~imikolov/rnnlm/ and parse training set and test set
into paddle reader creators.
20
"""
21
import paddle.dataset.common
22
import collections
23
import tarfile
24
import six
25

Y
Your Name 已提交
26
__all__ = ['train', 'test', 'build_dict', 'convert']
27 28 29 30 31

URL = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz'
MD5 = '30177ea32e27c525793142b6bf2c8e2d'


32 33 34 35 36
class DataType(object):
    NGRAM = 1
    SEQ = 2


37
def word_count(f, word_freq=None):
38 39
    if word_freq is None:
        word_freq = collections.defaultdict(int)
40 41 42

    for l in f:
        for w in l.strip().split():
43 44 45
            word_freq[w] += 1
        word_freq['<s>'] += 1
        word_freq['<e>'] += 1
46 47 48 49

    return word_freq


Y
yangyaming 已提交
50
def build_dict(min_word_freq=50):
Q
qijun 已提交
51
    """
Q
qijun 已提交
52 53
    Build a word dictionary from the corpus,  Keys of the dictionary are words,
    and values are zero-based IDs of these words.
Q
qijun 已提交
54
    """
55 56
    train_filename = './simple-examples/data/ptb.train.txt'
    test_filename = './simple-examples/data/ptb.valid.txt'
57
    with tarfile.open(
58 59 60
            paddle.dataset.common.download(paddle.dataset.imikolov.URL,
                                           'imikolov',
                                           paddle.dataset.imikolov.MD5)) as tf:
61 62 63
        trainf = tf.extractfile(train_filename)
        testf = tf.extractfile(test_filename)
        word_freq = word_count(testf, word_count(trainf))
64 65 66
        if '<unk>' in word_freq:
            # remove <unk> for now, since we will set it as last index
            del word_freq['<unk>']
67

68 69 70
        word_freq = [
            x for x in six.moves.iteritems(word_freq) if x[1] > min_word_freq
        ]
71

72 73
        word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0]))
        words, _ = list(zip(*word_freq_sorted))
74
        word_idx = dict(list(zip(words, six.moves.range(len(words)))))
Y
Yi Wang 已提交
75
        word_idx['<unk>'] = len(words)
76 77 78 79

    return word_idx


80
def reader_creator(filename, word_idx, n, data_type):
81 82
    def reader():
        with tarfile.open(
83 84 85
                paddle.dataset.common.download(
                    paddle.dataset.imikolov.URL, 'imikolov',
                    paddle.dataset.imikolov.MD5)) as tf:
86 87
            f = tf.extractfile(filename)

Y
Yi Wang 已提交
88
            UNK = word_idx['<unk>']
89
            for l in f:
90 91 92 93 94
                if DataType.NGRAM == data_type:
                    assert n > -1, 'Invalid gram length'
                    l = ['<s>'] + l.strip().split() + ['<e>']
                    if len(l) >= n:
                        l = [word_idx.get(w, UNK) for w in l]
95
                        for i in six.moves.range(n, len(l) + 1):
96 97 98
                            yield tuple(l[i - n:i])
                elif DataType.SEQ == data_type:
                    l = l.strip().split()
Y
Yi Wang 已提交
99
                    l = [word_idx.get(w, UNK) for w in l]
100 101 102 103 104 105
                    src_seq = [word_idx['<s>']] + l
                    trg_seq = l + [word_idx['<e>']]
                    if n > 0 and len(src_seq) > n: continue
                    yield src_seq, trg_seq
                else:
                    assert False, 'Unknow data type'
106 107 108 109

    return reader


110
def train(word_idx, n, data_type=DataType.NGRAM):
Q
qijun 已提交
111
    """
Q
qijun 已提交
112
    imikolov training set creator.
Q
qijun 已提交
113

Q
qijun 已提交
114
    It returns a reader creator, each sample in the reader is a word ID
Q
qijun 已提交
115 116 117 118
    tuple.

    :param word_idx: word dictionary
    :type word_idx: dict
119
    :param n: sliding window size if type is ngram, otherwise max length of sequence
Q
qijun 已提交
120
    :type n: int
121 122
    :param data_type: data type (ngram or sequence)
    :type data_type: member variable of DataType (NGRAM or SEQ)
Q
qijun 已提交
123
    :return: Training reader creator
Q
qijun 已提交
124 125
    :rtype: callable
    """
126 127
    return reader_creator('./simple-examples/data/ptb.train.txt', word_idx, n,
                          data_type)
128 129


130
def test(word_idx, n, data_type=DataType.NGRAM):
Q
qijun 已提交
131 132 133
    """
    imikolov test set creator.

Q
qijun 已提交
134
    It returns a reader creator, each sample in the reader is a word ID
Q
qijun 已提交
135 136 137 138
    tuple.

    :param word_idx: word dictionary
    :type word_idx: dict
139
    :param n: sliding window size if type is ngram, otherwise max length of sequence
Q
qijun 已提交
140
    :type n: int
141 142
    :param data_type: data type (ngram or sequence)
    :type data_type: member variable of DataType (NGRAM or SEQ)
Q
qijun 已提交
143
    :return: Test reader creator
Q
qijun 已提交
144 145
    :rtype: callable
    """
146 147
    return reader_creator('./simple-examples/data/ptb.valid.txt', word_idx, n,
                          data_type)
Y
Yancey1989 已提交
148 149


150
def fetch():
151
    paddle.dataset.common.download(URL, "imikolov", MD5)
R
root 已提交
152 153 154 155 156 157 158 159


def convert(path):
    """
    Converts dataset to recordio format
    """
    N = 5
    word_dict = build_dict()
160 161 162 163
    paddle.dataset.common.convert(path,
                                  train(word_dict, N), 1000, "imikolov_train")
    paddle.dataset.common.convert(path,
                                  test(word_dict, N), 1000, "imikolov_test")