sentiment.py 4.4 KB
Newer Older
W
wen-bo-yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
# /usr/bin/env python
# -*- coding:utf-8 -*-

# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Y
Yu Yang 已提交
18
The script fetch and preprocess movie_reviews data set that provided by NLTK
W
wen-bo-yang 已提交
19

Y
Yu Yang 已提交
20
TODO(yuyang18): Complete dataset.
W
wen-bo-yang 已提交
21 22
"""

23 24
from __future__ import print_function

25
import six
W
wen-bo-yang 已提交
26
import collections
27
from itertools import chain
Y
Yu Yang 已提交
28

L
lilong12 已提交
29
import os
Y
Yu Yang 已提交
30
import nltk
W
wen-bo-yang 已提交
31
from nltk.corpus import movie_reviews
L
lilong12 已提交
32
import zipfile
33
from functools import cmp_to_key
W
wen-bo-yang 已提交
34

35
import paddle.dataset.common
Y
Yu Yang 已提交
36

L
lilong12 已提交
37 38 39
URL = "https://corpora.bj.bcebos.com/movie_reviews%2Fmovie_reviews.zip"
MD5 = '155de2b77c6834dd8eea7cbe88e93acb'

40
__all__ = ['train', 'test', 'get_word_dict']
W
wen-bo-yang 已提交
41 42 43 44 45 46 47 48 49
NUM_TRAINING_INSTANCES = 1600
NUM_TOTAL_INSTANCES = 2000


def download_data_if_not_yet():
    """
    Download the data set, if the data set is not download.
    """
    try:
L
lilong12 已提交
50 51 52 53 54 55 56 57
        # download and extract movie_reviews.zip
        paddle.dataset.common.download(
            URL, 'corpora', md5sum=MD5, save_name='movie_reviews.zip')
        path = os.path.join(paddle.dataset.common.DATA_HOME, 'corpora')
        filename = os.path.join(path, 'movie_reviews.zip')
        zip_file = zipfile.ZipFile(filename)
        zip_file.extractall(path)
        zip_file.close()
W
wen-bo-yang 已提交
58
        # make sure that nltk can find the data
59 60
        if paddle.dataset.common.DATA_HOME not in nltk.data.path:
            nltk.data.path.append(paddle.dataset.common.DATA_HOME)
W
wen-bo-yang 已提交
61 62
        movie_reviews.categories()
    except LookupError:
63
        print("Downloading movie_reviews data set, please wait.....")
R
root 已提交
64
        nltk.download(
65
            'movie_reviews', download_dir=paddle.dataset.common.DATA_HOME)
66
        print("Download data set success.....")
67
        print("Path is " + nltk.data.find('corpora/movie_reviews').path)
W
wen-bo-yang 已提交
68 69 70 71 72 73 74 75 76


def get_word_dict():
    """
    Sorted the words by the frequency of words which occur in sample
    :return:
        words_freq_sorted
    """
    words_freq_sorted = list()
W
wen-bo-yang 已提交
77
    word_freq_dict = collections.defaultdict(int)
W
wen-bo-yang 已提交
78
    download_data_if_not_yet()
W
wen-bo-yang 已提交
79 80 81 82 83

    for category in movie_reviews.categories():
        for field in movie_reviews.fileids(category):
            for words in movie_reviews.words(field):
                word_freq_dict[words] += 1
Y
yuyang 已提交
84
    words_sort_list = list(six.iteritems(word_freq_dict))
85
    words_sort_list.sort(key=cmp_to_key(lambda a, b: b[1] - a[1]))
W
wen-bo-yang 已提交
86
    for index, word in enumerate(words_sort_list):
W
wen-bo-yang 已提交
87
        words_freq_sorted.append((word[0], index))
W
wen-bo-yang 已提交
88 89 90
    return words_freq_sorted


91 92 93 94 95 96 97 98 99
def sort_files():
    """
    Sorted the sample for cross reading the sample
    :return:
        files_list
    """
    files_list = list()
    neg_file_list = movie_reviews.fileids('neg')
    pos_file_list = movie_reviews.fileids('pos')
100 101
    files_list = list(
        chain.from_iterable(list(zip(neg_file_list, pos_file_list))))
102 103 104
    return files_list


W
wen-bo-yang 已提交
105 106 107 108 109 110
def load_sentiment_data():
    """
    Load the data set
    :return:
        data_set
    """
111
    data_set = list()
W
wen-bo-yang 已提交
112
    download_data_if_not_yet()
113 114 115 116 117 118 119
    words_ids = dict(get_word_dict())
    for sample_file in sort_files():
        words_list = list()
        category = 0 if 'neg' in sample_file else 1
        for word in movie_reviews.words(sample_file):
            words_list.append(words_ids[word.lower()])
        data_set.append((words_list, category))
W
wen-bo-yang 已提交
120 121 122 123 124
    return data_set


def reader_creator(data):
    """
W
wen-bo-yang 已提交
125
    Reader creator, generate an iterator for data set
W
wen-bo-yang 已提交
126 127 128 129
    :param data:
        train data set or test data set
    """
    for each in data:
W
wen-bo-yang 已提交
130
        yield each[0], each[1]
W
wen-bo-yang 已提交
131 132 133 134


def train():
    """
Q
qijun 已提交
135
    Default training set reader creator
W
wen-bo-yang 已提交
136
    """
W
wen-bo-yang 已提交
137
    data_set = load_sentiment_data()
W
wen-bo-yang 已提交
138 139 140 141 142 143 144
    return reader_creator(data_set[0:NUM_TRAINING_INSTANCES])


def test():
    """
    Default test set reader creator
    """
W
wen-bo-yang 已提交
145
    data_set = load_sentiment_data()
W
wen-bo-yang 已提交
146
    return reader_creator(data_set[NUM_TRAINING_INSTANCES:])
Y
Yancey1989 已提交
147 148


149
def fetch():
150
    nltk.download('movie_reviews', download_dir=paddle.dataset.common.DATA_HOME)