download.py 11.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
20
import os.path as osp
21 22 23 24 25 26 27
import shutil
import requests
import tqdm
import hashlib
import tarfile
import zipfile

K
Kaipeng Deng 已提交
28
from .voc_utils import create_list
29

30 31 32 33 34
import logging
logger = logging.getLogger(__name__)

__all__ = ['get_weights_path', 'get_dataset_path']

35 36
WEIGHTS_HOME = osp.expanduser("~/.cache/paddle/weights")
DATASET_HOME = osp.expanduser("~/.cache/paddle/dataset")
37

K
Kaipeng Deng 已提交
38
# dict of {dataset_name: (download_info, sub_dirs)}
39
# download info: (url, md5sum)
40
DATASETS = {
41
    'coco': ([
W
wangguanzhong 已提交
42 43 44 45 46 47 48 49 50
        (
            'http://images.cocodataset.org/zips/train2017.zip',
            'cced6f7f71b7629ddf16f17bbcfab6b2', ),
        (
            'http://images.cocodataset.org/zips/val2017.zip',
            '442b8da7639aecaf257c1dceb8ba8c80', ),
        (
            'http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
            'f4bbac642086de4f52a3fdda2de5fa2c', ),
51 52
    ], ["annotations", "train2017", "val2017"]),
    'voc': ([
W
wangguanzhong 已提交
53 54 55 56 57 58 59 60 61
        (
            'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
            '6cd6e144f989b92b3379bac3b3de84fd', ),
        (
            'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
            'c52e279531787c972589f7e41ab4ae64', ),
        (
            'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar',
            'b6e924de25625d8de591ea690078ad9f', ),
K
Kaipeng Deng 已提交
62
    ], ["VOCdevkit/VOC2012", "VOCdevkit/VOC2007"]),
G
Guanghua Yu 已提交
63 64 65 66 67 68 69 70 71 72 73
    'wider_face': ([
        (
            'https://dataset.bj.bcebos.com/wider_face/WIDER_train.zip',
            '3fedf70df600953d25982bcd13d91ba2', ),
        (
            'https://dataset.bj.bcebos.com/wider_face/WIDER_val.zip',
            'dfa7d7e790efa35df3788964cf0bbaea', ),
        (
            'https://dataset.bj.bcebos.com/wider_face/wider_face_split.zip',
            'a4a898d6193db4b9ef3260a68bad0dc7', ),
    ], ["WIDER_train", "WIDER_val", "wider_face_split"]),
W
wangguanzhong 已提交
74 75 76
    'fruit': ([(
        'https://dataset.bj.bcebos.com/PaddleDetection_demo/fruit-detection.tar',
        'ee4a1bf2e321b75b0850cc6e063f79d7', ), ], ["fruit-detection"]),
77 78 79 80 81 82 83 84 85
}

DOWNLOAD_RETRY_LIMIT = 3


def get_weights_path(url):
    """Get weights path from WEIGHT_HOME, if not exists,
    download it from url.
    """
K
Kaipeng Deng 已提交
86 87
    path, _ = get_path(url, WEIGHTS_HOME)
    return path
88 89


90
def get_dataset_path(path, annotation, image_dir):
91 92 93 94 95
    """
    If path exists, return path.
    Otherwise, get dataset path from DATASET_HOME, if not exists,
    download it.
    """
96
    if _dataset_exists(path, annotation, image_dir):
97 98
        return path

K
Kaipeng Deng 已提交
99
    logger.info("Dataset {} is not valid for reason above, try searching {} or "
100
                "downloading dataset...".format(
101 102
                    osp.realpath(path), DATASET_HOME))

103
    for name, dataset in DATASETS.items():
104
        if os.path.split(path.strip().lower())[-1] == name:
105
            logger.info("Parse dataset_dir {} as dataset "
106
                        "{}".format(path, name))
107 108
            data_dir = osp.join(DATASET_HOME, name)

K
Kaipeng Deng 已提交
109
            # For voc, only check dir VOCdevkit/VOC2012, VOCdevkit/VOC2007
110
            if name == 'voc':
K
Kaipeng Deng 已提交
111 112 113 114 115 116 117 118
                exists = True
                for sub_dir in dataset[1]:
                    check_dir = osp.join(data_dir, sub_dir)
                    if osp.exists(check_dir):
                        logger.info("Found {}".format(check_dir))
                    else:
                        exists = False
                if exists:
119 120
                    return data_dir

K
Kaipeng Deng 已提交
121 122
            # voc exist is checked above, voc is not exist here
            check_exist = name != 'voc'
123
            for url, md5sum in dataset[0]:
K
Kaipeng Deng 已提交
124
                get_path(url, data_dir, md5sum, check_exist)
125

K
Kaipeng Deng 已提交
126
            # voc should create list after download
127
            if name == 'voc':
K
Kaipeng Deng 已提交
128
                create_voc_list(data_dir)
129 130 131
            return data_dir

    # not match any dataset in DATASETS
K
Kaipeng Deng 已提交
132 133
    raise ValueError("Dataset {} is not valid and cannot parse dataset type "
                     "'{}' for automaticly downloading, which only supports "
W
wangguanzhong 已提交
134 135
                     "'voc', 'coco', 'wider_face' and 'fruit' currently".format(
                         path, osp.split(path)[-1]))
136 137


K
Kaipeng Deng 已提交
138 139 140
def create_voc_list(data_dir, devkit_subdir='VOCdevkit'):
    logger.info("Create voc file list...")
    devkit_dir = osp.join(data_dir, devkit_subdir)
K
Kaipeng Deng 已提交
141
    years = ['2007', '2012']
K
Kaipeng Deng 已提交
142

K
Kaipeng Deng 已提交
143 144 145 146
    # NOTE: since using auto download VOC
    # dataset, VOC default label list should be used, 
    # do not generate label_list.txt here. For default
    # label, see ../data/source/voc_loader.py
K
Kaipeng Deng 已提交
147 148
    create_list(devkit_dir, years, data_dir)
    logger.info("Create voc file list finished")
K
Kaipeng Deng 已提交
149 150


151 152
def map_path(url, root_dir):
    # parse path after download to decompress under root_dir
153
    fname = osp.split(url)[-1]
154 155 156 157 158 159 160
    zip_formats = ['.zip', '.tar', '.gz']
    fpath = fname
    for zip_format in zip_formats:
        fpath = fpath.replace(zip_format, '')
    return osp.join(root_dir, fpath)


K
Kaipeng Deng 已提交
161
def get_path(url, root_dir, md5sum=None, check_exist=True):
162 163 164 165 166 167 168 169 170 171 172
    """ Download from given url to root_dir.
    if file or directory specified by url is exists under
    root_dir, return the path directly, otherwise download
    from url and decompress it, return the path.

    url (str): download url
    root_dir (str): root dir for downloading, it should be
                    WEIGHTS_HOME or DATASET_HOME
    md5sum (str): md5 sum of download package
    """
    # parse path after download to decompress under root_dir
173
    fullpath = map_path(url, root_dir)
174 175 176 177

    # For same zip file, decompressed directory name different
    # from zip file name, rename by following map
    decompress_name_map = {
K
Kaipeng Deng 已提交
178 179 180
        "VOCtrainval_11-May-2012": "VOCdevkit/VOC2012",
        "VOCtrainval_06-Nov-2007": "VOCdevkit/VOC2007",
        "VOCtest_06-Nov-2007": "VOCdevkit/VOC2007",
181 182 183 184
        "annotations_trainval": "annotations"
    }
    for k, v in decompress_name_map.items():
        if fullpath.find(k) >= 0:
185
            fullpath = osp.join(osp.split(fullpath)[0], v)
186

K
Kaipeng Deng 已提交
187 188 189
    exist_flag = False
    if osp.exists(fullpath) and check_exist:
        exist_flag = True
190 191
        logger.info("Found {}".format(fullpath))
    else:
K
Kaipeng Deng 已提交
192
        exist_flag = False
193 194 195
        fullname = _download(url, root_dir, md5sum)
        _decompress(fullname)

K
Kaipeng Deng 已提交
196
    return fullpath, exist_flag
197 198


K
Kaipeng Deng 已提交
199 200 201 202 203 204 205
def download_dataset(path, dataset=None):
    if dataset not in DATASETS.keys():
        logger.error("Unknown dataset {}, it should be "
                     "{}".format(dataset, DATASETS.keys()))
        return
    dataset_info = DATASETS[dataset][0]
    for info in dataset_info:
K
Kaipeng Deng 已提交
206
        get_path(info[0], path, info[1], False)
K
Kaipeng Deng 已提交
207 208 209
    logger.info("Download dataset {} finished.".format(dataset))


210
def _dataset_exists(path, annotation, image_dir):
211 212 213 214
    """
    Check if user define dataset exists
    """
    if not osp.exists(path):
K
Kaipeng Deng 已提交
215
        logger.info("Config dataset_dir {} is not exits, "
G
Guanghua Yu 已提交
216
                    "dataset config is not valid".format(path))
217 218
        return False

219 220 221 222
    if annotation:
        annotation_path = osp.join(path, annotation)
        if not osp.isfile(annotation_path):
            logger.info("Config annotation {} is not a "
K
Kaipeng Deng 已提交
223 224
                        "file, dataset config is not "
                        "valid".format(annotation_path))
225 226 227 228 229
            return False
    if image_dir:
        image_path = osp.join(path, image_dir)
        if not osp.isdir(image_path):
            logger.info("Config image_dir {} is not a "
K
Kaipeng Deng 已提交
230 231
                        "directory, dataset config is not "
                        "valid".format(image_path))
232
            return False
233 234 235
    return True


236 237 238 239 240 241 242
def _download(url, path, md5sum=None):
    """
    Download from url, save to path.

    url (str): download url
    path (str): download to given path
    """
243
    if not osp.exists(path):
244 245
        os.makedirs(path)

246
    fname = osp.split(url)[-1]
247
    fullname = osp.join(path, fname)
248 249
    retry_cnt = 0

250
    while not (osp.exists(fullname) and _md5check(fullname, md5sum)):
251 252 253 254 255 256 257 258 259 260 261 262 263
        if retry_cnt < DOWNLOAD_RETRY_LIMIT:
            retry_cnt += 1
        else:
            raise RuntimeError("Download from {} failed. "
                               "Retry limit reached".format(url))

        logger.info("Downloading {} from {}".format(fname, url))

        req = requests.get(url, stream=True)
        if req.status_code != 200:
            raise RuntimeError("Downloading from {} failed with code "
                               "{}!".format(url, req.status_code))

K
Kaipeng Deng 已提交
264 265 266 267
        # For protecting download interupted, download to
        # tmp_fullname firstly, move tmp_fullname to fullname
        # after download finished
        tmp_fullname = fullname + "_tmp"
268
        total_size = req.headers.get('content-length')
K
Kaipeng Deng 已提交
269
        with open(tmp_fullname, 'wb') as f:
270 271 272 273 274 275 276 277 278 279
            if total_size:
                for chunk in tqdm.tqdm(
                        req.iter_content(chunk_size=1024),
                        total=(int(total_size) + 1023) // 1024,
                        unit='KB'):
                    f.write(chunk)
            else:
                for chunk in req.iter_content(chunk_size=1024):
                    if chunk:
                        f.write(chunk)
K
Kaipeng Deng 已提交
280
        shutil.move(tmp_fullname, fullname)
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311

    return fullname


def _md5check(fullname, md5sum=None):
    if md5sum is None:
        return True

    logger.info("File {} md5 checking...".format(fullname))
    md5 = hashlib.md5()
    with open(fullname, 'rb') as f:
        for chunk in iter(lambda: f.read(4096), b""):
            md5.update(chunk)
    calc_md5sum = md5.hexdigest()

    if calc_md5sum != md5sum:
        logger.info("File {} md5 check failed, {}(calc) != "
                    "{}(base)".format(fullname, calc_md5sum, md5sum))
        return False
    return True


def _decompress(fname):
    """
    Decompress for zip and tar file
    """
    logger.info("Decompressing {}...".format(fname))

    # For protecting decompressing interupted,
    # decompress to fpath_tmp directory firstly, if decompress
    # successed, move decompress files to fpath and delete
312
    # fpath_tmp and remove download compress file.
313
    fpath = osp.split(fname)[0]
314 315
    fpath_tmp = osp.join(fpath, 'tmp')
    if osp.isdir(fpath_tmp):
316 317 318 319 320 321 322 323 324 325 326 327 328
        shutil.rmtree(fpath_tmp)
        os.makedirs(fpath_tmp)

    if fname.find('tar') >= 0:
        with tarfile.open(fname) as tf:
            tf.extractall(path=fpath_tmp)
    elif fname.find('zip') >= 0:
        with zipfile.ZipFile(fname) as zf:
            zf.extractall(path=fpath_tmp)
    else:
        raise TypeError("Unsupport compress file type {}".format(fname))

    for f in os.listdir(fpath_tmp):
329 330 331 332 333
        src_dir = osp.join(fpath_tmp, f)
        dst_dir = osp.join(fpath, f)
        _move_and_merge_tree(src_dir, dst_dir)

    shutil.rmtree(fpath_tmp)
334
    os.remove(fname)
335 336 337 338


def _move_and_merge_tree(src, dst):
    """
G
Guanghua Yu 已提交
339
    Move src directory to dst, if dst is already exists,
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
    merge src to dst
    """
    if not osp.exists(dst):
        shutil.move(src, dst)
    else:
        for fp in os.listdir(src):
            src_fp = osp.join(src, fp)
            dst_fp = osp.join(dst, fp)
            if osp.isdir(src_fp):
                if osp.isdir(dst_fp):
                    _move_and_merge_tree(src_fp, dst_fp)
                else:
                    shutil.move(src_fp, dst_fp)
            elif osp.isfile(src_fp) and \
                    not osp.isfile(dst_fp):
                shutil.move(src_fp, dst_fp)