data_feeder.py 18.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
from . import core
Y
Yu Yang 已提交
18
import numpy
C
chengduoZH 已提交
19
import os
20 21
import six
from six.moves import zip, range, xrange
Y
yuyang18 已提交
22
import multiprocessing
Y
Yu Yang 已提交
23

24
from .framework import Variable, default_main_program, _current_expected_place
C
chengduo 已提交
25
from .framework import _cpu_num, _cuda_ids
Y
Yu Yang 已提交
26 27 28
__all__ = ['DataFeeder']


S
sneaxiy 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
def convert_dtype(dtype):
    if dtype == core.VarDesc.VarType.FP32:
        return 'float32'
    elif dtype == core.VarDesc.VarType.INT64:
        return 'int64'
    elif dtype == core.VarDesc.VarType.FP64:
        return 'float64'
    elif dtype == core.VarDesc.VarType.FP16:
        return 'float16'
    elif dtype == core.VarDesc.VarType.INT32:
        return 'int32'
    elif dtype == core.VarDesc.VarType.UINT8:
        return 'uint8'
    else:
        raise ValueError("dtype must be any of [int32, float32, int64, "
                         "float64, uint8]")


Y
Yu Yang 已提交
47 48 49 50 51
class DataToLoDTensorConverter(object):
    def __init__(self, place, lod_level, shape, dtype):
        self.place = place
        self.lod_level = lod_level
        self.shape = shape
52 53 54 55 56 57 58
        negtive_count = 0
        for s in self.shape:
            if s < 0:
                negtive_count += 1
            if negtive_count > 1:
                self.shape = None
                break
S
sneaxiy 已提交
59 60
        self.dtype = convert_dtype(dtype)
        self._reset()
Y
Yu Yang 已提交
61

S
sneaxiy 已提交
62
    def _reset(self):
Y
Yu Yang 已提交
63
        self.data = []
S
sneaxiy 已提交
64
        self.lod = [[] for _ in six.moves.range(self.lod_level)]
Y
Yu Yang 已提交
65 66 67 68 69 70 71 72

    def feed(self, data):
        self._feed_impl_(data, self.lod, self.lod_level)

    def _feed_impl_(self, data, lod, lod_level):
        if lod_level == 0:
            self.data.append(data)
        else:
73
            lod[0].append(len(data))
Y
Yu Yang 已提交
74
            for each_data in data:
K
Kexin Zhao 已提交
75
                self._feed_impl_(each_data, lod[1:], lod_level - 1)
Y
Yu Yang 已提交
76

S
sneaxiy 已提交
77
    def _check_shape(self, shape):
S
sneaxiy 已提交
78 79 80 81 82 83
        for s1, s2 in zip(self.shape, shape):
            if s1 != s2 and s1 >= 0 and s2 >= 0:
                raise ValueError(
                    "Shape not match. What is defined in data layer is {}, but receive {}".
                    format(self.shape, shape))

Y
Yu Yang 已提交
84
    def done(self):
85
        arr = numpy.array(self.data, dtype=self.dtype)
S
sneaxiy 已提交
86 87
        if self.shape:
            if len(arr.shape) != len(self.shape):
S
sneaxiy 已提交
88 89 90 91 92 93
                try:
                    arr = arr.reshape(self.shape)
                except ValueError:
                    raise ValueError(
                        "Reshape error. What is defined in data layer is {}, but receive {}"
                        .format(self.shape, arr.shape))
Y
Yu Yang 已提交
94 95 96
        t = core.LoDTensor()
        t.set(arr, self.place)
        if self.lod_level > 0:
97
            t.set_recursive_sequence_lengths(self.lod)
S
sneaxiy 已提交
98
        self._reset()
Y
Yu Yang 已提交
99 100 101
        return t


S
sneaxiy 已提交
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
class BatchedTensorProvider(object):
    def __init__(self, feed_list, place, batch_size, generator, drop_last):
        self.place = place
        self.batch_size = batch_size
        self.generator = generator
        self.converters = []
        self.drop_last = drop_last

        for var in feed_list:
            assert var.lod_level == 0, "lod_level must be 0"
            self.converters.append(
                DataToLoDTensorConverter(
                    place=self.place,
                    lod_level=0,
                    shape=var.shape,
                    dtype=var.dtype))

    def _done(self):
        return [c.done() for c in self.converters]

    def __call__(self):
        idx = 0
        for each_sample in self.generator():
            for each_slot, each_converter in six.moves.zip(each_sample,
                                                           self.converters):
                each_converter.data.append(each_slot)

            idx += 1
            if idx == self.batch_size:
                idx = 0
                yield self._done()

        if not self.drop_last and idx > 0:
            yield self._done()
        else:
            [c._reset() for c in self.converters]


Y
Yu Yang 已提交
140
class DataFeeder(object):
C
chengduoZH 已提交
141
    """
C
chengduoZH 已提交
142 143
    DataFeeder converts the data that returned by a reader into a data
    structure that can feed into Executor and ParallelExecutor. The reader
C
chengduoZH 已提交
144
    usually returns a list of mini-batch data entries. Each data entry in
C
chengduoZH 已提交
145 146
    the list is one sample. Each sample is a list or a tuple with one
    feature or multiple features.
C
chengduoZH 已提交
147 148 149 150 151

    The simple usage shows below:

    ..  code-block:: python

152
        import paddle.fluid as fluid
C
chengduoZH 已提交
153
        place = fluid.CPUPlace()
C
chengduoZH 已提交
154
        img = fluid.layers.data(name='image', shape=[1, 28, 28])
C
chengduoZH 已提交
155
        label = fluid.layers.data(name='label', shape=[1], dtype='int64')
C
chengduoZH 已提交
156 157
        feeder = fluid.DataFeeder([img, label], fluid.CPUPlace())
        result = feeder.feed([([0] * 784, [9]), ([1] * 784, [1])])
C
chengduoZH 已提交
158 159 160 161 162 163 164


    If you want to feed data into GPU side separately in advance when you
    use multi-GPU to train a model, you can use `decorate_reader` function.

    ..  code-block:: python

165 166 167
        import paddle
        import paddle.fluid as fluid
        
C
chengduoZH 已提交
168
        place=fluid.CUDAPlace(0)
169 170 171
        data = fluid.layers.data(name='data', shape=[3, 224, 224], dtype='float32')
        label = fluid.layers.data(name='label', shape=[1], dtype='int64')
        
C
chengduoZH 已提交
172 173
        feeder = fluid.DataFeeder(place=place, feed_list=[data, label])
        reader = feeder.decorate_reader(
174
                paddle.batch(paddle.dataset.flowers.train(), batch_size=16), multi_devices=True)
C
chengduoZH 已提交
175 176 177 178

    Args:
        feed_list(list): The Variables or Variables'name that will
            feed into model.
C
chengduoZH 已提交
179 180 181 182
        place(Place): place indicates feed data into CPU or GPU, if you want to
            feed data into GPU, please using `fluid.CUDAPlace(i)` (`i` represents
            the GPU id), or if you want to feed data into CPU, please using
            `fluid.CPUPlace()`.
C
chengduoZH 已提交
183 184 185 186
        program(Program): The Program that will feed data into, if program
            is None, it will use default_main_program(). Default None.

    Raises:
C
chengduoZH 已提交
187
        ValueError: If some Variable is not in this Program.
C
chengduoZH 已提交
188 189

    Examples:
190 191
        ..  code-block:: python

C
chengduoZH 已提交
192

193 194 195 196
            import numpy as np
            import paddle
            import paddle.fluid as fluid
            
C
chengduoZH 已提交
197
            place = fluid.CPUPlace()
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
            
            def reader():
                yield [np.random.random([4]).astype('float32'), np.random.random([3]).astype('float32')],
            
            main_program = fluid.Program()
            startup_program = fluid.Program()
            
            with fluid.program_guard(main_program, startup_program):
                data_1 = fluid.layers.data(name='data_1', shape=[1, 2, 2])
                data_2 = fluid.layers.data(name='data_2', shape=[1, 1, 3])
                out = fluid.layers.fc(input=[data_1, data_2], size=2)
                # ...
            
            feeder = fluid.DataFeeder([data_1, data_2], place)
                        
            exe = fluid.Executor(place)
            exe.run(startup_program)
C
chengduoZH 已提交
215 216
            for data in reader():
                outs = exe.run(program=main_program,
217 218 219
                               feed=feeder.feed(data),
                               fetch_list=[out])

C
chengduoZH 已提交
220 221
    """

F
fengjiayi 已提交
222
    def __init__(self, feed_list, place, program=None):
Y
Yu Yang 已提交
223 224 225 226
        self.feed_dtypes = []
        self.feed_names = []
        self.feed_shapes = []
        self.feed_lod_level = []
F
fengjiayi 已提交
227 228
        if program is None:
            program = default_main_program()
Y
Yu Yang 已提交
229
        for each_var in feed_list:
230
            if isinstance(each_var, six.string_types):
F
fengjiayi 已提交
231
                each_var = program.block(0).var(each_var)
Y
Yu Yang 已提交
232 233 234 235 236
            if not isinstance(each_var, Variable):
                raise TypeError("Feed list should contain a list of variable")
            self.feed_dtypes.append(each_var.dtype)
            self.feed_names.append(each_var.name)
            self.feed_lod_level.append(each_var.lod_level)
S
sneaxiy 已提交
237
            self.feed_shapes.append(each_var.shape)
Y
Yu Yang 已提交
238 239 240 241

        self.place = place

    def feed(self, iterable):
C
chengduoZH 已提交
242
        """
C
chengduoZH 已提交
243 244
        According to feed_list and iterable, converters the input into
        a data structure that can feed into Executor and ParallelExecutor.
C
chengduoZH 已提交
245 246 247 248 249 250

        Args:
            iterable(list|tuple): the input data.

        Returns:
            dict: the result of conversion.
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267

        Examples:
            ..  code-block:: python

                import numpy.random as random
                import paddle.fluid as fluid
                
                def reader(limit=5):
                    for i in range(limit):
                        yield random.random([784]).astype('float32'), random.random([1]).astype('int64'), random.random([256]).astype('float32')
                
                data_1 = fluid.layers.data(name='data_1', shape=[1, 28, 28])
                data_2 = fluid.layers.data(name='data_2', shape=[1], dtype='int64')
                data_3 = fluid.layers.data(name='data_3', shape=[16, 16], dtype='float32')
                feeder = fluid.DataFeeder(['data_1','data_2', 'data_3'], fluid.CPUPlace())
                
                result = feeder.feed(reader()) 
C
chengduoZH 已提交
268
        """
Y
Yu Yang 已提交
269
        converter = []
270
        for lod_level, shape, dtype in six.moves.zip(
Y
Yu Yang 已提交
271 272 273 274 275 276 277 278 279
                self.feed_lod_level, self.feed_shapes, self.feed_dtypes):
            converter.append(
                DataToLoDTensorConverter(
                    place=self.place,
                    lod_level=lod_level,
                    shape=shape,
                    dtype=dtype))

        for each_sample in iterable:
280
            assert len(each_sample) == len(converter), (
281 282
                "The number of fields in data (%d) does not match " +
                "len(feed_list) (%d)") % (len(each_sample), len(converter))
283 284
            for each_converter, each_slot in six.moves.zip(converter,
                                                           each_sample):
Y
Yu Yang 已提交
285 286
                each_converter.feed(each_slot)
        ret_dict = {}
287 288
        for each_name, each_converter in six.moves.zip(self.feed_names,
                                                       converter):
Y
Yu Yang 已提交
289 290
            ret_dict[each_name] = each_converter.done()
        return ret_dict
Y
yuyang18 已提交
291 292

    def feed_parallel(self, iterable, num_places=None):
C
chengduoZH 已提交
293 294
        """
        Takes multiple mini-batches. Each mini-batch will be feed on each
C
chengduoZH 已提交
295
        device in advance.
C
chengduoZH 已提交
296 297 298

        Args:
            iterable(list|tuple): the input data.
C
chengduoZH 已提交
299
            num_places(int): the number of devices. Default None.
C
chengduoZH 已提交
300 301 302 303 304 305

        Returns:
            dict: the result of conversion.

        Notes:
            The number of devices and number of mini-batches must be same.
306 307 308 309 310 311 312 313 314

        Examples:
            ..  code-block:: python

                import numpy.random as random
                import paddle.fluid as fluid
                
                def reader(limit=10):
                    for i in range(limit):
315
                        yield [random.random([784]).astype('float32'), random.random([1]).astype('float32')],
316 317
                
                x = fluid.layers.data(name='x', shape=[1, 28, 28])
318 319 320
                y = fluid.layers.data(name='y', shape=[1], dtype='float32')
                
                fluid.layers.elementwise_add(x, y)
321 322
                
                feeder = fluid.DataFeeder(['x','y'], fluid.CPUPlace())
323
                place_num = 2 
324 325 326 327 328 329 330 331 332 333
                places = [fluid.CPUPlace() for x in range(place_num)]
                data = []
                exe = fluid.Executor(fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                program = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel(places=places)
                for item in reader():
                    data.append(item)
                    if place_num == len(data):
                        exe.run(program=program, feed=list(feeder.feed_parallel(data, place_num)), fetch_list=[])
                        data = []
C
chengduoZH 已提交
334
        """
Y
yuyang18 已提交
335 336 337
        if isinstance(self.place, core.CUDAPlace):
            places = [
                core.CUDAPlace(i)
338 339
                for i in six.moves.xrange(
                    self._get_number_of_places_(num_places))
Y
yuyang18 已提交
340 341 342 343
            ]
        else:
            places = [
                core.CPUPlace()
344 345
                for _ in six.moves.xrange(
                    self._get_number_of_places_(num_places))
Y
yuyang18 已提交
346 347 348 349 350 351 352 353 354
            ]

        if len(iterable) != len(places):
            raise ValueError("feed_parallel takes multiple mini-batches. Each "
                             "mini-batch will be feed on each device. The "
                             "number of devices and number of mini-batches "
                             "must be same.")

        place = self.place
355
        for p, batch in six.moves.zip(places, iterable):
Y
yuyang18 已提交
356 357 358 359 360 361 362 363
            self.place = p
            yield self.feed(batch)
        self.place = place

    def _get_number_of_places_(self, num_places):
        if num_places is not None:
            return int(num_places)
        elif isinstance(self.place, core.CUDAPlace):
C
chengduo 已提交
364
            return len(_cuda_ids())
Y
yuyang18 已提交
365
        else:
C
chengduo 已提交
366
            return _cpu_num()
Y
yuyang18 已提交
367 368 369 370 371 372

    def decorate_reader(self,
                        reader,
                        multi_devices,
                        num_places=None,
                        drop_last=True):
C
chengduoZH 已提交
373 374 375 376 377
        """
        Converter the input data into a data that returned by reader into
        multiple mini-batches. Each mini-batch will be feed on each device.

        Args:
C
chengduo 已提交
378 379
            reader(function): the reader is the function which can generate data.
            multi_devices(bool): whether to use multiple devices or not.
Z
Zeng Jinle 已提交
380 381
            num_places(int): if multi_devices is True, you can specify the number
                of GPU to use, if multi_devices is None, the function will use all the
C
chengduo 已提交
382 383 384
                GPU of the current machine. Default None.
            drop_last(bool): whether to drop the last batch if the
                size of the last batch is less than batch_size. Default True.
C
chengduoZH 已提交
385 386 387 388 389

        Returns:
            dict: the result of conversion.

        Raises:
Z
Zeng Jinle 已提交
390
            ValueError: If drop_last is False and the data batch cannot fit for devices.
391 392 393 394 395 396 397

        Examples:
            ..  code-block:: python

                import numpy.random as random
                import paddle
                import paddle.fluid as fluid
398
                import paddle.fluid.compiler as compiler
399
                
400
                def reader(limit=10):
401 402 403
                    for i in range(limit):
                        yield (random.random([784]).astype('float32'), random.random([1]).astype('int64')),
                
404
                place=fluid.CUDAPlace(0)
405 406 407
                data = fluid.layers.data(name='data', shape=[1, 28, 28], dtype='float32')
                label = fluid.layers.data(name='label', shape=[1], dtype='int64')
                
408 409
                hidden = fluid.layers.fc(input=data, size=10)
                
410
                feeder = fluid.DataFeeder(place=place, feed_list=[data, label])
411
                reader = feeder.decorate_reader(reader, multi_devices=True)
412 413 414
                
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
415 416 417 418 419
                compiled_prog = compiler.CompiledProgram(
                         fluid.default_main_program()).with_data_parallel()
                for i,data in enumerate(reader()):
                    print('iteration : ', i + 1)
                    ret = exe.run(compiled_prog, feed=data, fetch_list=[hidden])
C
chengduoZH 已提交
420 421
        """

Y
yuyang18 已提交
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
        def __reader_creator__():
            if not multi_devices:
                for item in reader():
                    yield self.feed(item)
            else:
                num = self._get_number_of_places_(num_places)
                item = []
                for batch in reader():
                    item.append(batch)
                    if len(item) == num:
                        yield list(self.feed_parallel(item, num))
                        item = []
                if not drop_last and len(item) != 0:
                    raise ValueError(
                        "The data batch which cannot fit for devices will be "
                        "dropped is not implementation. Other strategies are "
                        "not implemented")

        return __reader_creator__
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500


class NumpyToLoDTensorConverter(object):
    def __init__(self, place):
        self.place = place
        self.data = []
        self._reset()

    def _reset(self):
        self.data = []

    def feed(self, data):
        self.data.append(data)

    def done(self):
        arr = numpy.array(self.data)
        t = core.LoDTensor()
        t.set(arr, self.place)
        self._reset()
        return t


class ListTensorProvider(object):
    def __init__(self, generator, places):
        self.generator = generator
        self.converters = []
        self.places = []
        if places:
            if not isinstance(places, (list, tuple)):
                places = [places]
            assert len(
                places) == 1, "dygraph mode CAN NOT specify multiple places."
            for place in places:
                if isinstance(place, (core.CUDAPlace, core.CPUPlace)):
                    self.places.append(place)
                else:
                    raise ValueError(
                        "Please specify a valid place values such as core.CPUPlace or core.CUDAPlace"
                    )
        if len(self.places) == 0:
            self.places.append(_current_expected_place())

    def _readData(self, iterable, places):
        for place, each_sample in six.moves.zip(places, iterable):
            for item in each_sample:
                if len(self.converters) < len(item):
                    for i in item:
                        self.converters.append(NumpyToLoDTensorConverter(place))
                for each_converter, each_slot in six.moves.zip(self.converters,
                                                               item):
                    each_converter.feed(each_slot)
            yield [c.done() for c in self.converters]

    def __call__(self):
        item = []
        for batch in self.generator():
            item.append(batch)
            if len(item) == len(self.places):
                yield list(self._readData(item, self.places))
                item = []