common.py 7.6 KB
Newer Older
D
dangqingqing 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import requests
Y
Yu Yang 已提交
18
import hashlib
Y
Yu Yang 已提交
19
import os
20
import errno
Y
Yu Yang 已提交
21
import shutil
M
minqiyang 已提交
22
import six
H
Helin Wang 已提交
23
import sys
24
import importlib
25
import paddle.dataset
26
import six.moves.cPickle as pickle
27
import glob
Y
Yu Yang 已提交
28

R
root 已提交
29
__all__ = [
Y
ying 已提交
30 31 32 33 34 35
    'DATA_HOME',
    'download',
    'md5file',
    'split',
    'cluster_files_reader',
    'convert',
R
root 已提交
36
]
Y
Yu Yang 已提交
37

38
DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset')
Y
Yu Yang 已提交
39

40

41 42 43 44 45
# When running unit tests, there could be multiple processes that
# trying to create DATA_HOME directory simultaneously, so we cannot
# use a if condition to check for the existence of the directory;
# instead, we use the filesystem as the synchronization mechanism by
# catching returned errors.
46 47 48 49 50 51 52 53 54 55
def must_mkdirs(path):
    try:
        os.makedirs(DATA_HOME)
    except OSError as exc:
        if exc.errno != errno.EEXIST:
            raise
        pass


must_mkdirs(DATA_HOME)
Y
Yu Yang 已提交
56 57


58 59 60 61 62 63 64
def md5file(fname):
    hash_md5 = hashlib.md5()
    f = open(fname, "rb")
    for chunk in iter(lambda: f.read(4096), b""):
        hash_md5.update(chunk)
    f.close()
    return hash_md5.hexdigest()
65 66


Y
ying 已提交
67
def download(url, module_name, md5sum, save_name=None):
68 69 70 71
    dirname = os.path.join(DATA_HOME, module_name)
    if not os.path.exists(dirname):
        os.makedirs(dirname)

Y
ying 已提交
72 73 74 75
    filename = os.path.join(dirname,
                            url.split('/')[-1]
                            if save_name is None else save_name)

Y
Yu Yang 已提交
76 77 78
    retry = 0
    retry_limit = 3
    while not (os.path.exists(filename) and md5file(filename) == md5sum):
T
wip  
typhoonzero 已提交
79
        if os.path.exists(filename):
80
            print("file md5", md5file(filename), md5sum)
Y
Yu Yang 已提交
81 82 83
        if retry < retry_limit:
            retry += 1
        else:
84
            raise RuntimeError("Cannot download {0} within retry limit {1}".
Y
Yu Yang 已提交
85
                               format(url, retry_limit))
86
        print("Cache file %s not found, downloading %s" % (filename, url))
87
        r = requests.get(url, stream=True)
H
Helin Wang 已提交
88 89 90
        total_length = r.headers.get('content-length')

        if total_length is None:
M
minqiyang 已提交
91
            with open(filename, 'wb') as f:
M
minqiyang 已提交
92
                shutil.copyfileobj(r.raw, f)
H
Helin Wang 已提交
93
        else:
M
minqiyang 已提交
94
            with open(filename, 'wb') as f:
H
Helin Wang 已提交
95 96 97
                dl = 0
                total_length = int(total_length)
                for data in r.iter_content(chunk_size=4096):
M
minqiyang 已提交
98 99
                    if six.PY2:
                        data = six.b(data)
H
Helin Wang 已提交
100
                    dl += len(data)
M
minqiyang 已提交
101
                    f.write(data)
H
Helin Wang 已提交
102 103 104 105
                    done = int(50 * dl / total_length)
                    sys.stdout.write("\r[%s%s]" % ('=' * done,
                                                   ' ' * (50 - done)))
                    sys.stdout.flush()
106 107

    return filename
Y
Yi Wang 已提交
108 109


110
def fetch_all():
111 112 113
    for module_name in [
            x for x in dir(paddle.dataset) if not x.startswith("__")
    ]:
114
        if "fetch" in dir(
115
                importlib.import_module("paddle.dataset.%s" % module_name)):
116
            getattr(
117
                importlib.import_module("paddle.dataset.%s" % module_name),
118
                "fetch")()
119 120


121
def fetch_all_recordio(path):
122 123 124
    for module_name in [
            x for x in dir(paddle.dataset) if not x.startswith("__")
    ]:
125
        if "convert" in dir(
126
                importlib.import_module("paddle.dataset.%s" % module_name)) and \
127 128 129 130
                not module_name == "common":
            ds_path = os.path.join(path, module_name)
            must_mkdirs(ds_path)
            getattr(
131
                importlib.import_module("paddle.dataset.%s" % module_name),
132 133 134
                "convert")(ds_path)


135
def split(reader, line_count, suffix="%05d.pickle", dumper=pickle.dump):
136 137 138
    """
    you can call the function as:

139
    split(paddle.dataset.cifar.train10(), line_count=1000,
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
        suffix="imikolov-train-%05d.pickle")

    the output files as:

    |-imikolov-train-00000.pickle
    |-imikolov-train-00001.pickle
    |- ...
    |-imikolov-train-00480.pickle

    :param reader: is a reader creator
    :param line_count: line count for each file
    :param suffix: the suffix for the output files, should contain "%d"
                means the id for each file. Default is "%05d.pickle"
    :param dumper: is a callable function that dump object to file, this
                function will be called as dumper(obj, f) and obj is the object
                will be dumped, f is a file object. Default is cPickle.dump.
    """
    if not callable(dumper):
        raise TypeError("dumper should be callable.")
    lines = []
    indx_f = 0
    for i, d in enumerate(reader()):
        lines.append(d)
        if i >= line_count and i % line_count == 0:
            with open(suffix % indx_f, "w") as f:
                dumper(lines, f)
                lines = []
                indx_f += 1
    if lines:
        with open(suffix % indx_f, "w") as f:
            dumper(lines, f)


def cluster_files_reader(files_pattern,
                         trainer_count,
                         trainer_id,
176
                         loader=pickle.load):
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
    """
    Create a reader that yield element from the given files, select
    a file set according trainer count and trainer_id

    :param files_pattern: the files which generating by split(...)
    :param trainer_count: total trainer count
    :param trainer_id: the trainer rank id
    :param loader: is a callable function that load object from file, this
                function will be called as loader(f) and f is a file object.
                Default is cPickle.load
    """

    def reader():
        if not callable(loader):
            raise TypeError("loader should be callable.")
        file_list = glob.glob(files_pattern)
        file_list.sort()
        my_file_list = []
        for idx, fn in enumerate(file_list):
            if idx % trainer_count == trainer_id:
197
                print("append file: %s" % fn)
198 199 200 201 202 203 204 205
                my_file_list.append(fn)
        for fn in my_file_list:
            with open(fn, "r") as f:
                lines = loader(f)
                for line in lines:
                    yield line

    return reader
G
gongweibao 已提交
206 207


208
def convert(output_path, reader, line_count, name_prefix):
G
gongweibao 已提交
209 210 211 212 213
    import recordio
    """
    Convert data from reader to recordio format files.

    :param output_path: directory in which output files will be saved.
Y
ying 已提交
214 215
    :param reader: a data reader, from which the convert program will read
                   data instances.
G
gongweibao 已提交
216
    :param name_prefix: the name prefix of generated files.
Y
ying 已提交
217 218
    :param max_lines_to_shuffle: the max lines numbers to shuffle before
                                 writing.
G
gongweibao 已提交
219 220
    """

221 222
    assert line_count >= 1
    indx_f = 0
G
gongweibao 已提交
223

224 225 226 227
    def write_data(indx_f, lines):
        filename = "%s/%s-%05d" % (output_path, name_prefix, indx_f)
        writer = recordio.writer(filename)
        for l in lines:
228 229
            # FIXME(Yancey1989):
            # dumps with protocol: pickle.HIGHEST_PROTOCOL
230
            writer.write(pickle.dumps(l))
231
        writer.close()
G
gongweibao 已提交
232

G
gongweibao 已提交
233 234 235
    lines = []
    for i, d in enumerate(reader()):
        lines.append(d)
236 237
        if i % line_count == 0 and i >= line_count:
            write_data(indx_f, lines)
G
gongweibao 已提交
238
            lines = []
239
            indx_f += 1
G
gongweibao 已提交
240
            continue
G
gongweibao 已提交
241

242
    write_data(indx_f, lines)