data_feeder.py 11.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
from . import core
Y
Yu Yang 已提交
18
import numpy
C
chengduoZH 已提交
19
import os
20 21
import six
from six.moves import zip, range, xrange
Y
yuyang18 已提交
22
import multiprocessing
Y
Yu Yang 已提交
23

24
from .framework import Variable, default_main_program
Y
Yu Yang 已提交
25 26 27 28 29 30 31 32 33

__all__ = ['DataFeeder']


class DataToLoDTensorConverter(object):
    def __init__(self, place, lod_level, shape, dtype):
        self.place = place
        self.lod_level = lod_level
        self.shape = shape
34 35 36 37 38 39 40
        negtive_count = 0
        for s in self.shape:
            if s < 0:
                negtive_count += 1
            if negtive_count > 1:
                self.shape = None
                break
41
        if dtype == core.VarDesc.VarType.FP32:
Y
Yu Yang 已提交
42
            self.dtype = 'float32'
43
        elif dtype == core.VarDesc.VarType.INT64:
Y
Yu Yang 已提交
44
            self.dtype = 'int64'
45
        elif dtype == core.VarDesc.VarType.FP64:
Y
Yu Yang 已提交
46
            self.dtype = 'float64'
W
Wu Yi 已提交
47 48
        elif dtype == core.VarDesc.VarType.FP16:
            self.dtype = 'float16'
49
        elif dtype == core.VarDesc.VarType.INT32:
Y
Yu Yang 已提交
50
            self.dtype = 'int32'
F
fengjiayi 已提交
51 52
        elif dtype == core.VarDesc.VarType.UINT8:
            self.dtype = 'uint8'
Y
Yu Yang 已提交
53 54
        else:
            raise ValueError("dtype must be any of [int32, float32, int64, "
F
fengjiayi 已提交
55
                             "float64, uint8]")
Y
Yu Yang 已提交
56 57 58 59

        self.data = []
        self.lod = []

60
        for i in six.moves.range(lod_level):
61
            self.lod.append([])
Y
Yu Yang 已提交
62 63 64 65 66 67 68 69

    def feed(self, data):
        self._feed_impl_(data, self.lod, self.lod_level)

    def _feed_impl_(self, data, lod, lod_level):
        if lod_level == 0:
            self.data.append(data)
        else:
70
            lod[0].append(len(data))
Y
Yu Yang 已提交
71
            for each_data in data:
K
Kexin Zhao 已提交
72
                self._feed_impl_(each_data, lod[1:], lod_level - 1)
Y
Yu Yang 已提交
73

S
sneaxiy 已提交
74
    def _check_shape(self, shape):
S
sneaxiy 已提交
75 76 77 78 79 80
        for s1, s2 in zip(self.shape, shape):
            if s1 != s2 and s1 >= 0 and s2 >= 0:
                raise ValueError(
                    "Shape not match. What is defined in data layer is {}, but receive {}".
                    format(self.shape, shape))

Y
Yu Yang 已提交
81
    def done(self):
82
        arr = numpy.array(self.data, dtype=self.dtype)
S
sneaxiy 已提交
83 84
        if self.shape:
            if len(arr.shape) != len(self.shape):
S
sneaxiy 已提交
85 86 87 88 89 90
                try:
                    arr = arr.reshape(self.shape)
                except ValueError:
                    raise ValueError(
                        "Reshape error. What is defined in data layer is {}, but receive {}"
                        .format(self.shape, arr.shape))
S
sneaxiy 已提交
91
            else:
S
sneaxiy 已提交
92
                self._check_shape(arr.shape)
Y
Yu Yang 已提交
93 94 95
        t = core.LoDTensor()
        t.set(arr, self.place)
        if self.lod_level > 0:
96
            t.set_recursive_sequence_lengths(self.lod)
Y
Yu Yang 已提交
97 98 99 100
        return t


class DataFeeder(object):
C
chengduoZH 已提交
101
    """
C
chengduoZH 已提交
102 103
    DataFeeder converts the data that returned by a reader into a data
    structure that can feed into Executor and ParallelExecutor. The reader
C
chengduoZH 已提交
104
    usually returns a list of mini-batch data entries. Each data entry in
C
chengduoZH 已提交
105 106
    the list is one sample. Each sample is a list or a tuple with one
    feature or multiple features.
C
chengduoZH 已提交
107 108 109 110 111 112

    The simple usage shows below:

    ..  code-block:: python

        place = fluid.CPUPlace()
C
chengduoZH 已提交
113
        img = fluid.layers.data(name='image', shape=[1, 28, 28])
C
chengduoZH 已提交
114
        label = fluid.layers.data(name='label', shape=[1], dtype='int64')
C
chengduoZH 已提交
115 116
        feeder = fluid.DataFeeder([img, label], fluid.CPUPlace())
        result = feeder.feed([([0] * 784, [9]), ([1] * 784, [1])])
C
chengduoZH 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131


    If you want to feed data into GPU side separately in advance when you
    use multi-GPU to train a model, you can use `decorate_reader` function.

    ..  code-block:: python

        place=fluid.CUDAPlace(0)
        feeder = fluid.DataFeeder(place=place, feed_list=[data, label])
        reader = feeder.decorate_reader(
            paddle.batch(flowers.train(), batch_size=16))

    Args:
        feed_list(list): The Variables or Variables'name that will
            feed into model.
C
chengduoZH 已提交
132 133 134 135
        place(Place): place indicates feed data into CPU or GPU, if you want to
            feed data into GPU, please using `fluid.CUDAPlace(i)` (`i` represents
            the GPU id), or if you want to feed data into CPU, please using
            `fluid.CPUPlace()`.
C
chengduoZH 已提交
136 137 138 139
        program(Program): The Program that will feed data into, if program
            is None, it will use default_main_program(). Default None.

    Raises:
C
chengduoZH 已提交
140
        ValueError: If some Variable is not in this Program.
C
chengduoZH 已提交
141 142 143 144 145 146 147 148

    Examples:
        .. code-block:: python

            # ...
            place = fluid.CPUPlace()
            feed_list = [
                main_program.global_block().var(var_name) for var_name in feed_vars_name
C
chengduoZH 已提交
149
            ] # feed_vars_name is a list of variables' name.
C
chengduoZH 已提交
150 151 152 153 154 155
            feeder = fluid.DataFeeder(feed_list, place)
            for data in reader():
                outs = exe.run(program=main_program,
                               feed=feeder.feed(data))
    """

F
fengjiayi 已提交
156
    def __init__(self, feed_list, place, program=None):
Y
Yu Yang 已提交
157 158 159 160
        self.feed_dtypes = []
        self.feed_names = []
        self.feed_shapes = []
        self.feed_lod_level = []
F
fengjiayi 已提交
161 162
        if program is None:
            program = default_main_program()
Y
Yu Yang 已提交
163
        for each_var in feed_list:
164
            if isinstance(each_var, six.string_types):
F
fengjiayi 已提交
165
                each_var = program.block(0).var(each_var)
Y
Yu Yang 已提交
166 167 168 169 170
            if not isinstance(each_var, Variable):
                raise TypeError("Feed list should contain a list of variable")
            self.feed_dtypes.append(each_var.dtype)
            self.feed_names.append(each_var.name)
            self.feed_lod_level.append(each_var.lod_level)
S
sneaxiy 已提交
171
            self.feed_shapes.append(each_var.shape)
Y
Yu Yang 已提交
172 173 174 175

        self.place = place

    def feed(self, iterable):
C
chengduoZH 已提交
176
        """
C
chengduoZH 已提交
177 178
        According to feed_list and iterable, converters the input into
        a data structure that can feed into Executor and ParallelExecutor.
C
chengduoZH 已提交
179 180 181 182 183 184 185

        Args:
            iterable(list|tuple): the input data.

        Returns:
            dict: the result of conversion.
        """
Y
Yu Yang 已提交
186
        converter = []
187
        for lod_level, shape, dtype in six.moves.zip(
Y
Yu Yang 已提交
188 189 190 191 192 193 194 195 196
                self.feed_lod_level, self.feed_shapes, self.feed_dtypes):
            converter.append(
                DataToLoDTensorConverter(
                    place=self.place,
                    lod_level=lod_level,
                    shape=shape,
                    dtype=dtype))

        for each_sample in iterable:
197 198 199
            assert len(each_sample) == len(converter), (
                "The number of fields in data (%s) does not match " +
                "len(feed_list) (%s)") % (len(each_sample), len(converter))
200 201
            for each_converter, each_slot in six.moves.zip(converter,
                                                           each_sample):
Y
Yu Yang 已提交
202 203
                each_converter.feed(each_slot)
        ret_dict = {}
204 205
        for each_name, each_converter in six.moves.zip(self.feed_names,
                                                       converter):
Y
Yu Yang 已提交
206 207
            ret_dict[each_name] = each_converter.done()
        return ret_dict
Y
yuyang18 已提交
208 209

    def feed_parallel(self, iterable, num_places=None):
C
chengduoZH 已提交
210 211
        """
        Takes multiple mini-batches. Each mini-batch will be feed on each
C
chengduoZH 已提交
212
        device in advance.
C
chengduoZH 已提交
213 214 215

        Args:
            iterable(list|tuple): the input data.
C
chengduoZH 已提交
216
            num_places(int): the number of devices. Default None.
C
chengduoZH 已提交
217 218 219 220 221 222 223

        Returns:
            dict: the result of conversion.

        Notes:
            The number of devices and number of mini-batches must be same.
        """
Y
yuyang18 已提交
224 225 226
        if isinstance(self.place, core.CUDAPlace):
            places = [
                core.CUDAPlace(i)
227 228
                for i in six.moves.xrange(
                    self._get_number_of_places_(num_places))
Y
yuyang18 已提交
229 230 231 232
            ]
        else:
            places = [
                core.CPUPlace()
233 234
                for _ in six.moves.xrange(
                    self._get_number_of_places_(num_places))
Y
yuyang18 已提交
235 236 237 238 239 240 241 242 243
            ]

        if len(iterable) != len(places):
            raise ValueError("feed_parallel takes multiple mini-batches. Each "
                             "mini-batch will be feed on each device. The "
                             "number of devices and number of mini-batches "
                             "must be same.")

        place = self.place
244
        for p, batch in six.moves.zip(places, iterable):
Y
yuyang18 已提交
245 246 247 248 249 250 251 252 253 254
            self.place = p
            yield self.feed(batch)
        self.place = place

    def _get_number_of_places_(self, num_places):
        if num_places is not None:
            return int(num_places)
        elif isinstance(self.place, core.CUDAPlace):
            return core.get_cuda_device_count()
        else:
C
chengduoZH 已提交
255 256 257
            cpu_num = int(
                os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
            return cpu_num
Y
yuyang18 已提交
258 259 260 261 262 263

    def decorate_reader(self,
                        reader,
                        multi_devices,
                        num_places=None,
                        drop_last=True):
C
chengduoZH 已提交
264 265 266 267 268
        """
        Converter the input data into a data that returned by reader into
        multiple mini-batches. Each mini-batch will be feed on each device.

        Args:
C
chengduo 已提交
269 270 271 272 273 274 275
            reader(function): the reader is the function which can generate data.
            multi_devices(bool): whether to use multiple devices or not.
            num_places(int): if the multi_devices is True, you can specify the number
                of GPU to use, if 'num_places' is None, the function will use all the
                GPU of the current machine. Default None.
            drop_last(bool): whether to drop the last batch if the
                size of the last batch is less than batch_size. Default True.
C
chengduoZH 已提交
276 277 278 279 280

        Returns:
            dict: the result of conversion.

        Raises:
H
haowang101779990 已提交
281
            ValueError: If drop_last is False and the data batch which cannot fit for devices.
C
chengduoZH 已提交
282 283
        """

Y
yuyang18 已提交
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
        def __reader_creator__():
            if not multi_devices:
                for item in reader():
                    yield self.feed(item)
            else:
                num = self._get_number_of_places_(num_places)
                item = []
                for batch in reader():
                    item.append(batch)
                    if len(item) == num:
                        yield list(self.feed_parallel(item, num))
                        item = []
                if not drop_last and len(item) != 0:
                    raise ValueError(
                        "The data batch which cannot fit for devices will be "
                        "dropped is not implementation. Other strategies are "
                        "not implemented")

        return __reader_creator__