common.py 7.8 KB
Newer Older
D
dangqingqing 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import requests
Y
Yu Yang 已提交
16
import hashlib
Y
Yu Yang 已提交
17
import os
18
import errno
Y
Yu Yang 已提交
19
import shutil
H
Helin Wang 已提交
20
import sys
21
import importlib
22
import paddle.dataset
M
minqiyang 已提交
23
import paddle.fluid.compat as cpt
24
import six.moves.cPickle as pickle
25
import glob
Y
Yu Yang 已提交
26

R
root 已提交
27
__all__ = [
Y
ying 已提交
28 29 30 31 32 33
    'DATA_HOME',
    'download',
    'md5file',
    'split',
    'cluster_files_reader',
    'convert',
R
root 已提交
34
]
Y
Yu Yang 已提交
35

36
DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset')
Y
Yu Yang 已提交
37

38

39 40 41 42 43
# When running unit tests, there could be multiple processes that
# trying to create DATA_HOME directory simultaneously, so we cannot
# use a if condition to check for the existence of the directory;
# instead, we use the filesystem as the synchronization mechanism by
# catching returned errors.
44 45 46 47 48 49 50 51 52 53
def must_mkdirs(path):
    try:
        os.makedirs(DATA_HOME)
    except OSError as exc:
        if exc.errno != errno.EEXIST:
            raise
        pass


must_mkdirs(DATA_HOME)
Y
Yu Yang 已提交
54 55


56 57 58 59 60 61 62
def md5file(fname):
    hash_md5 = hashlib.md5()
    f = open(fname, "rb")
    for chunk in iter(lambda: f.read(4096), b""):
        hash_md5.update(chunk)
    f.close()
    return hash_md5.hexdigest()
63 64


Y
ying 已提交
65
def download(url, module_name, md5sum, save_name=None):
66 67 68 69
    dirname = os.path.join(DATA_HOME, module_name)
    if not os.path.exists(dirname):
        os.makedirs(dirname)

Y
ying 已提交
70 71 72 73
    filename = os.path.join(dirname,
                            url.split('/')[-1]
                            if save_name is None else save_name)

Y
Yu Yang 已提交
74 75 76
    retry = 0
    retry_limit = 3
    while not (os.path.exists(filename) and md5file(filename) == md5sum):
T
wip  
typhoonzero 已提交
77
        if os.path.exists(filename):
78
            print("file md5", md5file(filename), md5sum)
Y
Yu Yang 已提交
79 80 81
        if retry < retry_limit:
            retry += 1
        else:
82
            raise RuntimeError("Cannot download {0} within retry limit {1}".
Y
Yu Yang 已提交
83
                               format(url, retry_limit))
84
        print("Cache file %s not found, downloading %s" % (filename, url))
85
        r = requests.get(url, stream=True)
H
Helin Wang 已提交
86 87 88
        total_length = r.headers.get('content-length')

        if total_length is None:
M
minqiyang 已提交
89 90 91 92 93
            with open(filename, 'wb') as f:
                import sys
                print("write follow block")
                sys.stdout.flush()
                shutil.copyfileobj(cpt.to_bytes(r.raw), f)
H
Helin Wang 已提交
94
        else:
M
minqiyang 已提交
95 96 97 98
            with open(filename, 'wb') as f:
                import sys
                print("write follow length")
                sys.stdout.flush()
H
Helin Wang 已提交
99 100 101 102
                dl = 0
                total_length = int(total_length)
                for data in r.iter_content(chunk_size=4096):
                    dl += len(data)
M
minqiyang 已提交
103
                    f.write(cpt.to_bytes(data))
H
Helin Wang 已提交
104 105 106 107
                    done = int(50 * dl / total_length)
                    sys.stdout.write("\r[%s%s]" % ('=' * done,
                                                   ' ' * (50 - done)))
                    sys.stdout.flush()
108 109

    return filename
Y
Yi Wang 已提交
110 111


112
def fetch_all():
113 114 115
    for module_name in [
            x for x in dir(paddle.dataset) if not x.startswith("__")
    ]:
116
        if "fetch" in dir(
117
                importlib.import_module("paddle.dataset.%s" % module_name)):
118
            getattr(
119
                importlib.import_module("paddle.dataset.%s" % module_name),
120
                "fetch")()
121 122


123
def fetch_all_recordio(path):
124 125 126
    for module_name in [
            x for x in dir(paddle.dataset) if not x.startswith("__")
    ]:
127
        if "convert" in dir(
128
                importlib.import_module("paddle.dataset.%s" % module_name)) and \
129 130 131 132
                not module_name == "common":
            ds_path = os.path.join(path, module_name)
            must_mkdirs(ds_path)
            getattr(
133
                importlib.import_module("paddle.dataset.%s" % module_name),
134 135 136
                "convert")(ds_path)


137
def split(reader, line_count, suffix="%05d.pickle", dumper=pickle.dump):
138 139 140
    """
    you can call the function as:

141
    split(paddle.dataset.cifar.train10(), line_count=1000,
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
        suffix="imikolov-train-%05d.pickle")

    the output files as:

    |-imikolov-train-00000.pickle
    |-imikolov-train-00001.pickle
    |- ...
    |-imikolov-train-00480.pickle

    :param reader: is a reader creator
    :param line_count: line count for each file
    :param suffix: the suffix for the output files, should contain "%d"
                means the id for each file. Default is "%05d.pickle"
    :param dumper: is a callable function that dump object to file, this
                function will be called as dumper(obj, f) and obj is the object
                will be dumped, f is a file object. Default is cPickle.dump.
    """
    if not callable(dumper):
        raise TypeError("dumper should be callable.")
    lines = []
    indx_f = 0
    for i, d in enumerate(reader()):
        lines.append(d)
        if i >= line_count and i % line_count == 0:
            with open(suffix % indx_f, "w") as f:
                dumper(lines, f)
                lines = []
                indx_f += 1
    if lines:
        with open(suffix % indx_f, "w") as f:
            dumper(lines, f)


def cluster_files_reader(files_pattern,
                         trainer_count,
                         trainer_id,
178
                         loader=pickle.load):
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
    """
    Create a reader that yield element from the given files, select
    a file set according trainer count and trainer_id

    :param files_pattern: the files which generating by split(...)
    :param trainer_count: total trainer count
    :param trainer_id: the trainer rank id
    :param loader: is a callable function that load object from file, this
                function will be called as loader(f) and f is a file object.
                Default is cPickle.load
    """

    def reader():
        if not callable(loader):
            raise TypeError("loader should be callable.")
        file_list = glob.glob(files_pattern)
        file_list.sort()
        my_file_list = []
        for idx, fn in enumerate(file_list):
            if idx % trainer_count == trainer_id:
199
                print("append file: %s" % fn)
200 201 202 203 204 205 206 207
                my_file_list.append(fn)
        for fn in my_file_list:
            with open(fn, "r") as f:
                lines = loader(f)
                for line in lines:
                    yield line

    return reader
G
gongweibao 已提交
208 209


210
def convert(output_path, reader, line_count, name_prefix):
G
gongweibao 已提交
211 212 213 214 215
    import recordio
    """
    Convert data from reader to recordio format files.

    :param output_path: directory in which output files will be saved.
Y
ying 已提交
216 217
    :param reader: a data reader, from which the convert program will read
                   data instances.
G
gongweibao 已提交
218
    :param name_prefix: the name prefix of generated files.
Y
ying 已提交
219 220
    :param max_lines_to_shuffle: the max lines numbers to shuffle before
                                 writing.
G
gongweibao 已提交
221 222
    """

223 224
    assert line_count >= 1
    indx_f = 0
G
gongweibao 已提交
225

226 227 228 229
    def write_data(indx_f, lines):
        filename = "%s/%s-%05d" % (output_path, name_prefix, indx_f)
        writer = recordio.writer(filename)
        for l in lines:
230 231
            # FIXME(Yancey1989):
            # dumps with protocol: pickle.HIGHEST_PROTOCOL
232
            writer.write(pickle.dumps(l))
233
        writer.close()
G
gongweibao 已提交
234

G
gongweibao 已提交
235 236 237
    lines = []
    for i, d in enumerate(reader()):
        lines.append(d)
238 239
        if i % line_count == 0 and i >= line_count:
            write_data(indx_f, lines)
G
gongweibao 已提交
240
            lines = []
241
            indx_f += 1
G
gongweibao 已提交
242
            continue
G
gongweibao 已提交
243

244
    write_data(indx_f, lines)