builder.py 2.4 KB
Newer Older
Q
qingqing01 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import time
L
LielinJiang 已提交
16 17 18 19
import paddle
import numbers
import numpy as np

L
LielinJiang 已提交
20
from paddle.distributed import ParallelEnv
L
LielinJiang 已提交
21
from paddle.io import DistributedBatchSampler
L
LielinJiang 已提交
22 23 24

from .repeat_dataset import RepeatDataset
from ..utils.registry import Registry, build_from_config
L
LielinJiang 已提交
25 26 27 28

DATASETS = Registry("DATASETS")


L
LielinJiang 已提交
29 30 31 32 33 34 35 36 37 38 39 40
def build_dataset(cfg):
    name = cfg.pop('name')

    if name == 'RepeatDataset':
        dataset_ = build_from_config(cfg['dataset'], DATASETS)
        dataset = RepeatDataset(dataset_, cfg['times'])
    else:
        dataset = dataset = DATASETS.get(name)(**cfg)

    return dataset


41 42 43 44 45
def build_dataloader(cfg, is_train=True, distributed=True):
    cfg_ = cfg.copy()

    batch_size = cfg_.pop('batch_size', 1)
    num_workers = cfg_.pop('num_workers', 0)
L
lijianshe02 已提交
46
    use_shared_memory = cfg_.pop('use_shared_memory', True)
47

L
LielinJiang 已提交
48
    dataset = build_dataset(cfg_)
L
LielinJiang 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66

    if distributed:
        sampler = DistributedBatchSampler(dataset,
                                          batch_size=batch_size,
                                          shuffle=True if is_train else False,
                                          drop_last=True if is_train else False)

        dataloader = paddle.io.DataLoader(dataset,
                                          batch_sampler=sampler,
                                          num_workers=num_workers,
                                          use_shared_memory=use_shared_memory)
    else:
        dataloader = paddle.io.DataLoader(dataset,
                                          batch_size=batch_size,
                                          shuffle=True if is_train else False,
                                          drop_last=True if is_train else False,
                                          use_shared_memory=False,
                                          num_workers=num_workers)
L
LielinJiang 已提交
67

L
LielinJiang 已提交
68
    return dataloader