提交 97270b9f 编写于 作者: R root

add convert function

上级 90a327f5
...@@ -31,7 +31,7 @@ images per class. ...@@ -31,7 +31,7 @@ images per class.
import cPickle import cPickle
import itertools import itertools
import numpy import numpy
from common import download import paddle.v2.dataset.common
import tarfile import tarfile
__all__ = ['train100', 'test100', 'train10', 'test10'] __all__ = ['train100', 'test100', 'train10', 'test10']
...@@ -75,7 +75,8 @@ def train100(): ...@@ -75,7 +75,8 @@ def train100():
:rtype: callable :rtype: callable
""" """
return reader_creator( return reader_creator(
download(CIFAR100_URL, 'cifar', CIFAR100_MD5), 'train') paddle.v2.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5),
'train')
def test100(): def test100():
...@@ -88,7 +89,9 @@ def test100(): ...@@ -88,7 +89,9 @@ def test100():
:return: Test reader creator. :return: Test reader creator.
:rtype: callable :rtype: callable
""" """
return reader_creator(download(CIFAR100_URL, 'cifar', CIFAR100_MD5), 'test') return reader_creator(
paddle.v2.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5),
'test')
def train10(): def train10():
...@@ -102,7 +105,8 @@ def train10(): ...@@ -102,7 +105,8 @@ def train10():
:rtype: callable :rtype: callable
""" """
return reader_creator( return reader_creator(
download(CIFAR10_URL, 'cifar', CIFAR10_MD5), 'data_batch') paddle.v2.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5),
'data_batch')
def test10(): def test10():
...@@ -116,9 +120,20 @@ def test10(): ...@@ -116,9 +120,20 @@ def test10():
:rtype: callable :rtype: callable
""" """
return reader_creator( return reader_creator(
download(CIFAR10_URL, 'cifar', CIFAR10_MD5), 'test_batch') paddle.v2.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5),
'test_batch')
def fetch(): def fetch():
download(CIFAR10_URL, 'cifar', CIFAR10_MD5) paddle.v2.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5)
download(CIFAR100_URL, 'cifar', CIFAR100_MD5) paddle.v2.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.v2.dataset.common.convert(path, train100(), 10, "cifar_train100")
paddle.v2.dataset.common.convert(path, test100(), 10, "cifar_test100")
paddle.v2.dataset.common.convert(path, train10(), 10, "cifar_train10")
paddle.v2.dataset.common.convert(path, test10(), 10, "cifar_test10")
...@@ -23,7 +23,10 @@ import paddle.v2.dataset ...@@ -23,7 +23,10 @@ import paddle.v2.dataset
import cPickle import cPickle
import glob import glob
__all__ = ['DATA_HOME', 'download', 'md5file', 'split', 'cluster_files_reader'] __all__ = [
'DATA_HOME', 'download', 'md5file', 'split', 'cluster_files_reader',
'convert'
]
DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset') DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset')
......
...@@ -23,7 +23,7 @@ to initialize SRL model. ...@@ -23,7 +23,7 @@ to initialize SRL model.
import tarfile import tarfile
import gzip import gzip
import itertools import itertools
from common import download import paddle.v2.dataset.common
__all__ = ['test, get_dict', 'get_embedding'] __all__ = ['test, get_dict', 'get_embedding']
...@@ -182,9 +182,15 @@ def get_dict(): ...@@ -182,9 +182,15 @@ def get_dict():
""" """
Get the word, verb and label dictionary of Wikipedia corpus. Get the word, verb and label dictionary of Wikipedia corpus.
""" """
word_dict = load_dict(download(WORDDICT_URL, 'conll05st', WORDDICT_MD5)) word_dict = load_dict(
verb_dict = load_dict(download(VERBDICT_URL, 'conll05st', VERBDICT_MD5)) paddle.v2.dataset.common.download(WORDDICT_URL, 'conll05st',
label_dict = load_dict(download(TRGDICT_URL, 'conll05st', TRGDICT_MD5)) WORDDICT_MD5))
verb_dict = load_dict(
paddle.v2.dataset.common.download(VERBDICT_URL, 'conll05st',
VERBDICT_MD5))
label_dict = load_dict(
paddle.v2.dataset.common.download(TRGDICT_URL, 'conll05st',
TRGDICT_MD5))
return word_dict, verb_dict, label_dict return word_dict, verb_dict, label_dict
...@@ -192,7 +198,7 @@ def get_embedding(): ...@@ -192,7 +198,7 @@ def get_embedding():
""" """
Get the trained word vector based on Wikipedia corpus. Get the trained word vector based on Wikipedia corpus.
""" """
return download(EMB_URL, 'conll05st', EMB_MD5) return paddle.v2.dataset.common.download(EMB_URL, 'conll05st', EMB_MD5)
def test(): def test():
...@@ -209,15 +215,23 @@ def test(): ...@@ -209,15 +215,23 @@ def test():
""" """
word_dict, verb_dict, label_dict = get_dict() word_dict, verb_dict, label_dict = get_dict()
reader = corpus_reader( reader = corpus_reader(
download(DATA_URL, 'conll05st', DATA_MD5), paddle.v2.dataset.common.download(DATA_URL, 'conll05st', DATA_MD5),
words_name='conll05st-release/test.wsj/words/test.wsj.words.gz', words_name='conll05st-release/test.wsj/words/test.wsj.words.gz',
props_name='conll05st-release/test.wsj/props/test.wsj.props.gz') props_name='conll05st-release/test.wsj/props/test.wsj.props.gz')
return reader_creator(reader, word_dict, verb_dict, label_dict) return reader_creator(reader, word_dict, verb_dict, label_dict)
def fetch(): def fetch():
download(WORDDICT_URL, 'conll05st', WORDDICT_MD5) paddle.v2.dataset.common.download(WORDDICT_URL, 'conll05st', WORDDICT_MD5)
download(VERBDICT_URL, 'conll05st', VERBDICT_MD5) paddle.v2.dataset.common.download(VERBDICT_URL, 'conll05st', VERBDICT_MD5)
download(TRGDICT_URL, 'conll05st', TRGDICT_MD5) paddle.v2.dataset.common.download(TRGDICT_URL, 'conll05st', TRGDICT_MD5)
download(EMB_URL, 'conll05st', EMB_MD5) paddle.v2.dataset.common.download(EMB_URL, 'conll05st', EMB_MD5)
download(DATA_URL, 'conll05st', DATA_MD5) paddle.v2.dataset.common.download(DATA_URL, 'conll05st', DATA_MD5)
def convert():
"""
Converts dataset to recordio format
"""
paddle.v2.dataset.common.convert(path, test(), 10, "conl105_train")
paddle.v2.dataset.common.convert(path, test(), 10, "conl105_test")
...@@ -166,3 +166,14 @@ def word_dict(): ...@@ -166,3 +166,14 @@ def word_dict():
def fetch(): def fetch():
paddle.v2.dataset.common.download(URL, 'imdb', MD5) paddle.v2.dataset.common.download(URL, 'imdb', MD5)
def convert():
"""
Converts dataset to recordio format
"""
word_dict = ds.imdb.word_dict()
paddle.v2.dataset.common.convert(path, lambda: train(word_dict), 10,
"imdb_train")
paddle.v2.dataset.common.convert(path, lambda: test(word_dict), 10,
"imdb_test")
...@@ -18,7 +18,7 @@ This module will download dataset from ...@@ -18,7 +18,7 @@ This module will download dataset from
http://www.fit.vutbr.cz/~imikolov/rnnlm/ and parse training set and test set http://www.fit.vutbr.cz/~imikolov/rnnlm/ and parse training set and test set
into paddle reader creators. into paddle reader creators.
""" """
import paddle.v2.dataset.common import paddle.v2.dataset.common as common
import collections import collections
import tarfile import tarfile
...@@ -145,4 +145,14 @@ def test(word_idx, n, data_type=DataType.NGRAM): ...@@ -145,4 +145,14 @@ def test(word_idx, n, data_type=DataType.NGRAM):
def fetch(): def fetch():
paddle.v2.dataset.common.download(URL, "imikolov", MD5) common.download(URL, "imikolov", MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
N = 5
word_dict = build_dict()
common.convert(path, train(word_dict, N), 10, "imikolov_train")
common.convert(path, test(word_dict, N), 10, "imikolov_test")
...@@ -113,3 +113,11 @@ def fetch(): ...@@ -113,3 +113,11 @@ def fetch():
paddle.v2.dataset.common.download(TRAIN_LABEL_URL, 'mnist', TRAIN_LABEL_MD5) paddle.v2.dataset.common.download(TRAIN_LABEL_URL, 'mnist', TRAIN_LABEL_MD5)
paddle.v2.dataset.common.download(TEST_IMAGE_URL, 'mnist', TEST_IMAGE_MD5) paddle.v2.dataset.common.download(TEST_IMAGE_URL, 'mnist', TEST_IMAGE_MD5)
paddle.v2.dataset.common.download(TEST_LABEL_URL, 'mnist', TRAIN_LABEL_MD5) paddle.v2.dataset.common.download(TEST_LABEL_URL, 'mnist', TRAIN_LABEL_MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.v2.dataset.common.convert(path, train(), 10, "minist_train")
paddle.v2.dataset.common.convert(path, test(), 10, "minist_test")
...@@ -23,7 +23,7 @@ set and test set into paddle reader creators. ...@@ -23,7 +23,7 @@ set and test set into paddle reader creators.
""" """
import zipfile import zipfile
from common import download import paddle.v2.dataset.common
import re import re
import random import random
import functools import functools
...@@ -99,7 +99,7 @@ USER_INFO = None ...@@ -99,7 +99,7 @@ USER_INFO = None
def __initialize_meta_info__(): def __initialize_meta_info__():
fn = download(URL, "movielens", MD5) fn = paddle.v2.dataset.common.download(URL, "movielens", MD5)
global MOVIE_INFO global MOVIE_INFO
if MOVIE_INFO is None: if MOVIE_INFO is None:
pattern = re.compile(r'^(.*)\((\d+)\)$') pattern = re.compile(r'^(.*)\((\d+)\)$')
...@@ -246,7 +246,15 @@ def unittest(): ...@@ -246,7 +246,15 @@ def unittest():
def fetch(): def fetch():
download(URL, "movielens", MD5) paddle.v2.dataset.common.download(URL, "movielens", MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.v2.dataset.common.convert(path, train(), 10, "movielens_train")
paddle.v2.dataset.common.convert(path, test(), 10, "movielens_test")
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -26,7 +26,7 @@ from itertools import chain ...@@ -26,7 +26,7 @@ from itertools import chain
import nltk import nltk
from nltk.corpus import movie_reviews from nltk.corpus import movie_reviews
import common import paddle.v2.dataset.common
__all__ = ['train', 'test', 'get_word_dict'] __all__ = ['train', 'test', 'get_word_dict']
NUM_TRAINING_INSTANCES = 1600 NUM_TRAINING_INSTANCES = 1600
...@@ -39,12 +39,13 @@ def download_data_if_not_yet(): ...@@ -39,12 +39,13 @@ def download_data_if_not_yet():
""" """
try: try:
# make sure that nltk can find the data # make sure that nltk can find the data
if common.DATA_HOME not in nltk.data.path: if paddle.v2.dataset.common.DATA_HOME not in nltk.data.path:
nltk.data.path.append(common.DATA_HOME) nltk.data.path.append(paddle.v2.dataset.common.DATA_HOME)
movie_reviews.categories() movie_reviews.categories()
except LookupError: except LookupError:
print "Downloading movie_reviews data set, please wait....." print "Downloading movie_reviews data set, please wait....."
nltk.download('movie_reviews', download_dir=common.DATA_HOME) nltk.download(
'movie_reviews', download_dir=paddle.v2.dataset.common.DATA_HOME)
print "Download data set success....." print "Download data set success....."
print "Path is " + nltk.data.find('corpora/movie_reviews').path print "Path is " + nltk.data.find('corpora/movie_reviews').path
...@@ -128,4 +129,13 @@ def test(): ...@@ -128,4 +129,13 @@ def test():
def fetch(): def fetch():
nltk.download('movie_reviews', download_dir=common.DATA_HOME) nltk.download(
'movie_reviews', download_dir=paddle.v2.dataset.common.DATA_HOME)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.v2.dataset.common.convert(path, train, 10, "sentiment_train")
paddle.v2.dataset.common.convert(path, test, 10, "sentiment_test")
...@@ -14,14 +14,14 @@ ...@@ -14,14 +14,14 @@
""" """
UCI Housing dataset. UCI Housing dataset.
This module will download dataset from This module will paddle.v2.dataset.common.download dataset from
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and
parse training set and test set into paddle reader creators. parse training set and test set into paddle reader creators.
""" """
import numpy as np import numpy as np
import os import os
from common import download import paddle.v2.dataset.common
__all__ = ['train', 'test'] __all__ = ['train', 'test']
...@@ -82,7 +82,7 @@ def train(): ...@@ -82,7 +82,7 @@ def train():
:rtype: callable :rtype: callable
""" """
global UCI_TRAIN_DATA global UCI_TRAIN_DATA
load_data(download(URL, 'uci_housing', MD5)) load_data(paddle.v2.dataset.common.download(URL, 'uci_housing', MD5))
def reader(): def reader():
for d in UCI_TRAIN_DATA: for d in UCI_TRAIN_DATA:
...@@ -102,7 +102,7 @@ def test(): ...@@ -102,7 +102,7 @@ def test():
:rtype: callable :rtype: callable
""" """
global UCI_TEST_DATA global UCI_TEST_DATA
load_data(download(URL, 'uci_housing', MD5)) load_data(paddle.v2.dataset.common.download(URL, 'uci_housing', MD5))
def reader(): def reader():
for d in UCI_TEST_DATA: for d in UCI_TEST_DATA:
...@@ -112,4 +112,12 @@ def test(): ...@@ -112,4 +112,12 @@ def test():
def fetch(): def fetch():
download(URL, 'uci_housing', MD5) paddle.v2.dataset.common.download(URL, 'uci_housing', MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.v2.dataset.common.convert(path, train(), 10, "uci_housing_train")
paddle.v2.dataset.common.convert(path, test(), 10, "uci_houseing_test")
...@@ -22,7 +22,7 @@ parse training set and test set into paddle reader creators. ...@@ -22,7 +22,7 @@ parse training set and test set into paddle reader creators.
import tarfile import tarfile
import gzip import gzip
from paddle.v2.dataset.common import download import paddle.v2.dataset.common
from paddle.v2.parameters import Parameters from paddle.v2.parameters import Parameters
__all__ = ['train', 'test', 'build_dict'] __all__ = ['train', 'test', 'build_dict']
...@@ -115,7 +115,8 @@ def train(dict_size): ...@@ -115,7 +115,8 @@ def train(dict_size):
:rtype: callable :rtype: callable
""" """
return reader_creator( return reader_creator(
download(URL_TRAIN, 'wmt14', MD5_TRAIN), 'train/train', dict_size) paddle.v2.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN),
'train/train', dict_size)
def test(dict_size): def test(dict_size):
...@@ -130,16 +131,18 @@ def test(dict_size): ...@@ -130,16 +131,18 @@ def test(dict_size):
:rtype: callable :rtype: callable
""" """
return reader_creator( return reader_creator(
download(URL_TRAIN, 'wmt14', MD5_TRAIN), 'test/test', dict_size) paddle.v2.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN),
'test/test', dict_size)
def gen(dict_size): def gen(dict_size):
return reader_creator( return reader_creator(
download(URL_TRAIN, 'wmt14', MD5_TRAIN), 'gen/gen', dict_size) paddle.v2.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN),
'gen/gen', dict_size)
def model(): def model():
tar_file = download(URL_MODEL, 'wmt14', MD5_MODEL) tar_file = paddle.v2.dataset.common.download(URL_MODEL, 'wmt14', MD5_MODEL)
with gzip.open(tar_file, 'r') as f: with gzip.open(tar_file, 'r') as f:
parameters = Parameters.from_tar(f) parameters = Parameters.from_tar(f)
return parameters return parameters
...@@ -148,7 +151,7 @@ def model(): ...@@ -148,7 +151,7 @@ def model():
def get_dict(dict_size, reverse=True): def get_dict(dict_size, reverse=True):
# if reverse = False, return dict = {'a':'001', 'b':'002', ...} # if reverse = False, return dict = {'a':'001', 'b':'002', ...}
# else reverse = true, return dict = {'001':'a', '002':'b', ...} # else reverse = true, return dict = {'001':'a', '002':'b', ...}
tar_file = download(URL_TRAIN, 'wmt14', MD5_TRAIN) tar_file = paddle.v2.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN)
src_dict, trg_dict = __read_to_dict__(tar_file, dict_size) src_dict, trg_dict = __read_to_dict__(tar_file, dict_size)
if reverse: if reverse:
src_dict = {v: k for k, v in src_dict.items()} src_dict = {v: k for k, v in src_dict.items()}
...@@ -157,5 +160,14 @@ def get_dict(dict_size, reverse=True): ...@@ -157,5 +160,14 @@ def get_dict(dict_size, reverse=True):
def fetch(): def fetch():
download(URL_TRAIN, 'wmt14', MD5_TRAIN) paddle.v2.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN)
download(URL_MODEL, 'wmt14', MD5_MODEL) paddle.v2.dataset.common.download(URL_MODEL, 'wmt14', MD5_MODEL)
def convert(path):
"""
Converts dataset to recordio format
"""
dict_size = 30000
paddle.v2.dataset.common.convert(path, train(dict_size), 10, "wmt14_train")
paddle.v2.dataset.common.convert(path, test(dict_size), 10, "wmt14_test")
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册