builder.py 4.0 KB
Newer Older
Q
qingqing01 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import time
L
LielinJiang 已提交
16 17 18
import paddle
import numbers
import numpy as np
L
LielinJiang 已提交
19
from multiprocessing import Manager
L
LielinJiang 已提交
20
from paddle.distributed import ParallelEnv
L
LielinJiang 已提交
21

L
LielinJiang 已提交
22
from paddle.io import DistributedBatchSampler
L
LielinJiang 已提交
23 24 25 26 27
from ..utils.registry import Registry

DATASETS = Registry("DATASETS")


L
LielinJiang 已提交
28 29 30 31 32 33 34 35 36
class DictDataset(paddle.io.Dataset):
    def __init__(self, dataset):
        self.dataset = dataset
        self.tensor_keys_set = set()
        self.non_tensor_keys_set = set()
        self.non_tensor_dict = Manager().dict()

        single_item = dataset[0]
        self.keys = single_item.keys()
L
LielinJiang 已提交
37

L
LielinJiang 已提交
38 39
        for k, v in single_item.items():
            if not isinstance(v, (numbers.Number, np.ndarray)):
40
                setattr(self, k, Manager().dict())
L
LielinJiang 已提交
41 42 43 44 45 46 47
                self.non_tensor_keys_set.add(k)
            else:
                self.tensor_keys_set.add(k)

    def __getitem__(self, index):

        ori_map = self.dataset[index]
L
LielinJiang 已提交
48

L
LielinJiang 已提交
49
        tmp_list = []
L
LielinJiang 已提交
50

L
LielinJiang 已提交
51 52 53 54
        for k, v in ori_map.items():
            if isinstance(v, (numbers.Number, np.ndarray)):
                tmp_list.append(v)
            else:
55
                getattr(self, k).update({index: v})
L
LielinJiang 已提交
56

L
LielinJiang 已提交
57 58
        tmp_list.append(index)
        return tuple(tmp_list)
L
LielinJiang 已提交
59

L
LielinJiang 已提交
60 61 62 63 64
    def __len__(self):
        return len(self.dataset)

    def reset(self):
        for k in self.non_tensor_keys_set:
65
            setattr(self, k, Manager().dict())
L
LielinJiang 已提交
66 67 68


class DictDataLoader():
69
    def __init__(self, dataset, batch_size, is_train, num_workers=4):
L
LielinJiang 已提交
70 71 72

        self.dataset = DictDataset(dataset)

L
fix nan  
LielinJiang 已提交
73 74
        place = paddle.CUDAPlace(ParallelEnv().dev_id) \
                    if ParallelEnv().nranks > 1 else paddle.CUDAPlace(0)
L
LielinJiang 已提交
75

Q
qingqing01 已提交
76 77 78 79 80 81 82 83 84
        sampler = DistributedBatchSampler(self.dataset,
                                          batch_size=batch_size,
                                          shuffle=True if is_train else False,
                                          drop_last=True if is_train else False)

        self.dataloader = paddle.io.DataLoader(self.dataset,
                                               batch_sampler=sampler,
                                               places=place,
                                               num_workers=num_workers)
L
LielinJiang 已提交
85 86 87 88 89 90 91 92 93 94 95 96

        self.batch_size = batch_size

    def __iter__(self):

        self.dataset.reset()

        for i, data in enumerate(self.dataloader):
            return_dict = {}
            j = 0
            for k in self.dataset.keys:
                if k in self.dataset.tensor_keys_set:
L
LielinJiang 已提交
97 98 99
                    return_dict[k] = data[j] if isinstance(data,
                                                           (list,
                                                            tuple)) else data
L
LielinJiang 已提交
100 101 102 103 104 105 106 107 108
                    j += 1
                else:
                    return_dict[k] = self.get_items_by_indexs(k, data[-1])
            yield return_dict

    def __len__(self):
        return len(self.dataloader)

    def get_items_by_indexs(self, key, indexs):
littletomatodonkey's avatar
littletomatodonkey 已提交
109
        if isinstance(indexs, paddle.Tensor):
L
LielinJiang 已提交
110 111
            indexs = indexs.numpy()
        current_items = []
112
        items = getattr(self.dataset, key)
L
LielinJiang 已提交
113 114 115 116 117 118 119 120 121

        for index in indexs:
            current_items.append(items[index])

        return current_items


def build_dataloader(cfg, is_train=True):
    dataset = DATASETS.get(cfg.name)(cfg)
L
LielinJiang 已提交
122

L
LielinJiang 已提交
123 124
    batch_size = cfg.get('batch_size', 1)
    num_workers = cfg.get('num_workers', 0)
L
LielinJiang 已提交
125

126
    dataloader = DictDataLoader(dataset, batch_size, is_train, num_workers)
L
LielinJiang 已提交
127

L
LielinJiang 已提交
128
    return dataloader