common.py 7.5 KB
Newer Older
D
dangqingqing 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import requests
Y
Yu Yang 已提交
16
import hashlib
Y
Yu Yang 已提交
17
import os
18
import errno
Y
Yu Yang 已提交
19
import shutil
H
Helin Wang 已提交
20
import sys
21
import importlib
22
import paddle.dataset
23
import six.moves.cPickle as pickle
24
import glob
Y
Yu Yang 已提交
25

R
root 已提交
26
__all__ = [
Y
ying 已提交
27 28 29 30 31 32
    'DATA_HOME',
    'download',
    'md5file',
    'split',
    'cluster_files_reader',
    'convert',
R
root 已提交
33
]
Y
Yu Yang 已提交
34

35
DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset')
Y
Yu Yang 已提交
36

37

38 39 40 41 42
# When running unit tests, there could be multiple processes that
# trying to create DATA_HOME directory simultaneously, so we cannot
# use a if condition to check for the existence of the directory;
# instead, we use the filesystem as the synchronization mechanism by
# catching returned errors.
43 44 45 46 47 48 49 50 51 52
def must_mkdirs(path):
    try:
        os.makedirs(DATA_HOME)
    except OSError as exc:
        if exc.errno != errno.EEXIST:
            raise
        pass


must_mkdirs(DATA_HOME)
Y
Yu Yang 已提交
53 54


55 56 57 58 59 60 61
def md5file(fname):
    hash_md5 = hashlib.md5()
    f = open(fname, "rb")
    for chunk in iter(lambda: f.read(4096), b""):
        hash_md5.update(chunk)
    f.close()
    return hash_md5.hexdigest()
62 63


Y
ying 已提交
64
def download(url, module_name, md5sum, save_name=None):
65 66 67 68
    dirname = os.path.join(DATA_HOME, module_name)
    if not os.path.exists(dirname):
        os.makedirs(dirname)

Y
ying 已提交
69 70 71 72
    filename = os.path.join(dirname,
                            url.split('/')[-1]
                            if save_name is None else save_name)

Y
Yu Yang 已提交
73 74 75
    retry = 0
    retry_limit = 3
    while not (os.path.exists(filename) and md5file(filename) == md5sum):
T
wip  
typhoonzero 已提交
76
        if os.path.exists(filename):
77
            print("file md5", md5file(filename), md5sum)
Y
Yu Yang 已提交
78 79 80
        if retry < retry_limit:
            retry += 1
        else:
81
            raise RuntimeError("Cannot download {0} within retry limit {1}".
Y
Yu Yang 已提交
82
                               format(url, retry_limit))
83
        print("Cache file %s not found, downloading %s" % (filename, url))
84
        r = requests.get(url, stream=True)
H
Helin Wang 已提交
85 86 87
        total_length = r.headers.get('content-length')

        if total_length is None:
M
minqiyang 已提交
88
            with open(filename, 'wb') as f:
M
minqiyang 已提交
89
                shutil.copyfileobj(r.raw, f)
H
Helin Wang 已提交
90
        else:
M
minqiyang 已提交
91
            with open(filename, 'wb') as f:
H
Helin Wang 已提交
92 93 94 95
                dl = 0
                total_length = int(total_length)
                for data in r.iter_content(chunk_size=4096):
                    dl += len(data)
M
minqiyang 已提交
96
                    f.write(data)
H
Helin Wang 已提交
97 98 99 100
                    done = int(50 * dl / total_length)
                    sys.stdout.write("\r[%s%s]" % ('=' * done,
                                                   ' ' * (50 - done)))
                    sys.stdout.flush()
101 102

    return filename
Y
Yi Wang 已提交
103 104


105
def fetch_all():
106 107 108
    for module_name in [
            x for x in dir(paddle.dataset) if not x.startswith("__")
    ]:
109
        if "fetch" in dir(
110
                importlib.import_module("paddle.dataset.%s" % module_name)):
111
            getattr(
112
                importlib.import_module("paddle.dataset.%s" % module_name),
113
                "fetch")()
114 115


116
def fetch_all_recordio(path):
117 118 119
    for module_name in [
            x for x in dir(paddle.dataset) if not x.startswith("__")
    ]:
120
        if "convert" in dir(
121
                importlib.import_module("paddle.dataset.%s" % module_name)) and \
122 123 124 125
                not module_name == "common":
            ds_path = os.path.join(path, module_name)
            must_mkdirs(ds_path)
            getattr(
126
                importlib.import_module("paddle.dataset.%s" % module_name),
127 128 129
                "convert")(ds_path)


130
def split(reader, line_count, suffix="%05d.pickle", dumper=pickle.dump):
131 132 133
    """
    you can call the function as:

134
    split(paddle.dataset.cifar.train10(), line_count=1000,
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
        suffix="imikolov-train-%05d.pickle")

    the output files as:

    |-imikolov-train-00000.pickle
    |-imikolov-train-00001.pickle
    |- ...
    |-imikolov-train-00480.pickle

    :param reader: is a reader creator
    :param line_count: line count for each file
    :param suffix: the suffix for the output files, should contain "%d"
                means the id for each file. Default is "%05d.pickle"
    :param dumper: is a callable function that dump object to file, this
                function will be called as dumper(obj, f) and obj is the object
                will be dumped, f is a file object. Default is cPickle.dump.
    """
    if not callable(dumper):
        raise TypeError("dumper should be callable.")
    lines = []
    indx_f = 0
    for i, d in enumerate(reader()):
        lines.append(d)
        if i >= line_count and i % line_count == 0:
            with open(suffix % indx_f, "w") as f:
                dumper(lines, f)
                lines = []
                indx_f += 1
    if lines:
        with open(suffix % indx_f, "w") as f:
            dumper(lines, f)


def cluster_files_reader(files_pattern,
                         trainer_count,
                         trainer_id,
171
                         loader=pickle.load):
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
    """
    Create a reader that yield element from the given files, select
    a file set according trainer count and trainer_id

    :param files_pattern: the files which generating by split(...)
    :param trainer_count: total trainer count
    :param trainer_id: the trainer rank id
    :param loader: is a callable function that load object from file, this
                function will be called as loader(f) and f is a file object.
                Default is cPickle.load
    """

    def reader():
        if not callable(loader):
            raise TypeError("loader should be callable.")
        file_list = glob.glob(files_pattern)
        file_list.sort()
        my_file_list = []
        for idx, fn in enumerate(file_list):
            if idx % trainer_count == trainer_id:
192
                print("append file: %s" % fn)
193 194 195 196 197 198 199 200
                my_file_list.append(fn)
        for fn in my_file_list:
            with open(fn, "r") as f:
                lines = loader(f)
                for line in lines:
                    yield line

    return reader
G
gongweibao 已提交
201 202


203
def convert(output_path, reader, line_count, name_prefix):
G
gongweibao 已提交
204 205 206 207 208
    import recordio
    """
    Convert data from reader to recordio format files.

    :param output_path: directory in which output files will be saved.
Y
ying 已提交
209 210
    :param reader: a data reader, from which the convert program will read
                   data instances.
G
gongweibao 已提交
211
    :param name_prefix: the name prefix of generated files.
Y
ying 已提交
212 213
    :param max_lines_to_shuffle: the max lines numbers to shuffle before
                                 writing.
G
gongweibao 已提交
214 215
    """

216 217
    assert line_count >= 1
    indx_f = 0
G
gongweibao 已提交
218

219 220 221 222
    def write_data(indx_f, lines):
        filename = "%s/%s-%05d" % (output_path, name_prefix, indx_f)
        writer = recordio.writer(filename)
        for l in lines:
223 224
            # FIXME(Yancey1989):
            # dumps with protocol: pickle.HIGHEST_PROTOCOL
225
            writer.write(pickle.dumps(l))
226
        writer.close()
G
gongweibao 已提交
227

G
gongweibao 已提交
228 229 230
    lines = []
    for i, d in enumerate(reader()):
        lines.append(d)
231 232
        if i % line_count == 0 and i >= line_count:
            write_data(indx_f, lines)
G
gongweibao 已提交
233
            lines = []
234
            indx_f += 1
G
gongweibao 已提交
235
            continue
G
gongweibao 已提交
236

237
    write_data(indx_f, lines)