imdb.py 4.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Q
qijun 已提交
15
IMDB dataset.
Y
Yu Yang 已提交
16

Q
qijun 已提交
17 18 19 20
This module downloads IMDB dataset from
http://ai.stanford.edu/%7Eamaas/data/sentiment/. This dataset contains a set
of 25,000 highly polar movie reviews for training, and 25,000 for testing.
Besides, this module also provides API for building dictionary.
21
"""
D
dangqingqing 已提交
22

23 24
from __future__ import print_function

25
import paddle.dataset.common
26
import collections
Y
Yi Wang 已提交
27 28 29
import tarfile
import re
import string
M
minqiyang 已提交
30
import six
Y
Yi Wang 已提交
31

Y
Your Name 已提交
32
__all__ = ['build_dict', 'train', 'test', 'convert']
Y
Yi Wang 已提交
33 34 35 36 37 38

URL = 'http://ai.stanford.edu/%7Eamaas/data/sentiment/aclImdb_v1.tar.gz'
MD5 = '7c2ac02c03563afcf9b574c7e56c153a'


def tokenize(pattern):
Q
qijun 已提交
39
    """
Q
qijun 已提交
40
    Read files that match the given pattern.  Tokenize and yield each file.
Q
qijun 已提交
41 42
    """

43
    with tarfile.open(paddle.dataset.common.download(URL, 'imdb', MD5)) as tarf:
Y
Yi Wang 已提交
44 45 46 47
        # Note that we should use tarfile.next(), which does
        # sequential access of member files, other than
        # tarfile.extractfile, which does random access and might
        # destroy hard disks.
M
minqiyang 已提交
48
        tf = tarf.next()
Y
Yi Wang 已提交
49 50 51
        while tf != None:
            if bool(pattern.match(tf.name)):
                # newline and punctuations removal and ad-hoc tokenization.
52 53 54
                yield tarf.extractfile(tf).read().rstrip(six.b(
                    "\n\r")).translate(
                        None, six.b(string.punctuation)).lower().split()
M
minqiyang 已提交
55
            tf = tarf.next()
Y
Yi Wang 已提交
56 57 58


def build_dict(pattern, cutoff):
Q
qijun 已提交
59
    """
Q
qijun 已提交
60 61
    Build a word dictionary from the corpus. Keys of the dictionary are words,
    and values are zero-based IDs of these words.
Q
qijun 已提交
62
    """
63
    word_freq = collections.defaultdict(int)
Y
Yi Wang 已提交
64 65
    for doc in tokenize(pattern):
        for word in doc:
66
            word_freq[word] += 1
Y
Yi Wang 已提交
67 68

    # Not sure if we should prune less-frequent words here.
M
minqiyang 已提交
69
    word_freq = [x for x in six.iteritems(word_freq) if x[1] > cutoff]
Y
Yi Wang 已提交
70 71 72

    dictionary = sorted(word_freq, key=lambda x: (-x[1], x[0]))
    words, _ = list(zip(*dictionary))
M
minqiyang 已提交
73
    word_idx = dict(list(zip(words, six.moves.range(len(words)))))
Y
Yi Wang 已提交
74 75 76 77
    word_idx['<unk>'] = len(words)
    return word_idx


D
dangqingqing 已提交
78
def reader_creator(pos_pattern, neg_pattern, word_idx):
Y
Yi Wang 已提交
79
    UNK = word_idx['<unk>']
D
dangqingqing 已提交
80
    INS = []
Y
Yi Wang 已提交
81

D
dangqingqing 已提交
82
    def load(pattern, out, label):
Y
Yi Wang 已提交
83
        for doc in tokenize(pattern):
D
dangqingqing 已提交
84 85 86 87
            out.append(([word_idx.get(w, UNK) for w in doc], label))

    load(pos_pattern, INS, 0)
    load(neg_pattern, INS, 1)
Y
Yi Wang 已提交
88 89

    def reader():
D
dangqingqing 已提交
90 91
        for doc, label in INS:
            yield doc, label
Y
Yi Wang 已提交
92

F
fengjiayi 已提交
93
    return reader
Y
Yi Wang 已提交
94 95 96


def train(word_idx):
Q
qijun 已提交
97
    """
Q
qijun 已提交
98
    IMDB training set creator.
Q
qijun 已提交
99

Q
qijun 已提交
100
    It returns a reader creator, each sample in the reader is an zero-based ID
Q
qijun 已提交
101 102 103 104
    sequence and label in [0, 1].

    :param word_idx: word dictionary
    :type word_idx: dict
Q
qijun 已提交
105
    :return: Training reader creator
Q
qijun 已提交
106 107
    :rtype: callable
    """
Y
Yi Wang 已提交
108 109
    return reader_creator(
        re.compile("aclImdb/train/pos/.*\.txt$"),
D
dangqingqing 已提交
110
        re.compile("aclImdb/train/neg/.*\.txt$"), word_idx)
Y
Yi Wang 已提交
111 112 113


def test(word_idx):
Q
qijun 已提交
114 115 116
    """
    IMDB test set creator.

Q
qijun 已提交
117
    It returns a reader creator, each sample in the reader is an zero-based ID
Q
qijun 已提交
118 119 120 121 122 123 124
    sequence and label in [0, 1].

    :param word_idx: word dictionary
    :type word_idx: dict
    :return: Test reader creator
    :rtype: callable
    """
Y
Yi Wang 已提交
125 126
    return reader_creator(
        re.compile("aclImdb/test/pos/.*\.txt$"),
D
dangqingqing 已提交
127
        re.compile("aclImdb/test/neg/.*\.txt$"), word_idx)
H
hedaoyuan 已提交
128 129 130


def word_dict():
Q
qijun 已提交
131
    """
Q
qijun 已提交
132
    Build a word dictionary from the corpus.
Q
qijun 已提交
133 134 135 136

    :return: Word dictionary
    :rtype: dict
    """
H
hedaoyuan 已提交
137 138
    return build_dict(
        re.compile("aclImdb/((train)|(test))/((pos)|(neg))/.*\.txt$"), 150)
Y
Yancey1989 已提交
139 140


141
def fetch():
142
    paddle.dataset.common.download(URL, 'imdb', MD5)
R
root 已提交
143 144


Y
Your Name 已提交
145
def convert(path):
R
root 已提交
146 147 148
    """
    Converts dataset to recordio format
    """
Y
Your Name 已提交
149
    w = word_dict()
150 151
    paddle.dataset.common.convert(path, lambda: train(w), 1000, "imdb_train")
    paddle.dataset.common.convert(path, lambda: test(w), 1000, "imdb_test")