data_feeder.py 21.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
from . import core
18
import numpy as np
C
chengduoZH 已提交
19
import os
20 21
import six
from six.moves import zip, range, xrange
Y
yuyang18 已提交
22
import multiprocessing
23
import warnings
Y
Yu Yang 已提交
24

25
from .framework import Variable, default_main_program, _current_expected_place, in_dygraph_mode
C
chengduo 已提交
26
from .framework import _cpu_num, _cuda_ids
Y
Yu Yang 已提交
27 28 29
__all__ = ['DataFeeder']


S
sneaxiy 已提交
30
def convert_dtype(dtype):
P
pkpk 已提交
31
    if isinstance(dtype, core.VarDesc.VarType):
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
        if dtype == core.VarDesc.VarType.BOOL:
            return 'bool'
        elif dtype == core.VarDesc.VarType.FP16:
            return 'float16'
        elif dtype == core.VarDesc.VarType.FP32:
            return 'float32'
        elif dtype == core.VarDesc.VarType.FP64:
            return 'float64'
        elif dtype == core.VarDesc.VarType.INT8:
            return 'int8'
        elif dtype == core.VarDesc.VarType.INT16:
            return 'int16'
        elif dtype == core.VarDesc.VarType.INT32:
            return 'int32'
        elif dtype == core.VarDesc.VarType.INT64:
            return 'int64'
        elif dtype == core.VarDesc.VarType.UINT8:
            return 'uint8'
50 51 52 53 54 55
    elif isinstance(dtype, type):
        if dtype in [
                np.bool, np.float16, np.float32, np.float64, np.int8, np.int16,
                np.int32, np.int64, np.uint8
        ]:
            return dtype.__name__
P
pkpk 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68
    else:
        if dtype in [
                'bool', 'float16', 'float32', 'float64', 'int8', 'int16',
                'int32', 'int64', 'uint8', u'bool', u'float16', u'float32',
                u'float64', u'int8', u'int16', u'int32', u'int64', u'uint8'
        ]:
            # this code is a little bit dangerous, since error could happen
            # when casting no-asci code to str in python2.
            # but since the set itself is limited, so currently, it is good.
            # however, jointly supporting python2 and python3, (as well as python4 maybe)
            # may still be a long-lasting problem.
            return str(dtype)

69 70 71
    raise ValueError(
        "dtype must be any of [bool, float16, float32, float64, int8, int16, "
        "int32, int64, uint8]")
S
sneaxiy 已提交
72 73


74 75 76 77 78
def check_variable_and_dtype(input,
                             input_name,
                             expected_dtype,
                             op_name,
                             extra_message=''):
79 80
    check_type(input, input_name, (Variable, core.VarBase), op_name,
               extra_message)
81 82 83 84
    check_dtype(input.dtype, input_name, expected_dtype, op_name, extra_message)


def check_type(input, input_name, expected_type, op_name, extra_message=''):
85 86 87 88 89 90 91 92 93
    # NOTE [ Why skip dynamic graph check ]:
    # 1. If the input type / dtype of a layer is wrong, it will be reported
    # directly on that line. User can easily print the relevant information
    # on which line. It is easier to debug, so there is no need to check
    # in dynamic graph mode.
    # 2. Performance considerations. Because these checks are executed at
    # each step in dynamic graph mode, it will bring a heavy performance burden.
    if in_dygraph_mode():
        return
94 95 96 97 98 99 100 101 102 103 104
    if not isinstance(input, expected_type):
        raise TypeError(
            "The type of '%s' in %s must be %s, but received %s. %s" %
            (input_name, op_name, expected_type, type(input), extra_message))


def check_dtype(input_dtype,
                input_name,
                expected_dtype,
                op_name,
                extra_message=''):
105 106 107
    # See NOTE [ Why skip dynamic graph check ]
    if in_dygraph_mode():
        return
108 109 110 111 112 113 114 115 116 117 118
    if convert_dtype(input_dtype) in ['float16']:
        warnings.warn(
            "The data type of '%s' in %s only support float16 in GPU now. %s" %
            (input_name, op_name, extra_message))
    if convert_dtype(input_dtype) not in expected_dtype:
        raise TypeError(
            "The data type of '%s' in %s must be %s, but received %s. %s" %
            (input_name, op_name, expected_dtype, convert_dtype(input_dtype),
             extra_message))


Y
Yu Yang 已提交
119 120 121 122 123
class DataToLoDTensorConverter(object):
    def __init__(self, place, lod_level, shape, dtype):
        self.place = place
        self.lod_level = lod_level
        self.shape = shape
124 125 126 127 128 129 130
        negtive_count = 0
        for s in self.shape:
            if s < 0:
                negtive_count += 1
            if negtive_count > 1:
                self.shape = None
                break
S
sneaxiy 已提交
131 132
        self.dtype = convert_dtype(dtype)
        self._reset()
Y
Yu Yang 已提交
133

S
sneaxiy 已提交
134
    def _reset(self):
Y
Yu Yang 已提交
135
        self.data = []
S
sneaxiy 已提交
136
        self.lod = [[] for _ in six.moves.range(self.lod_level)]
Y
Yu Yang 已提交
137 138 139 140 141 142 143 144

    def feed(self, data):
        self._feed_impl_(data, self.lod, self.lod_level)

    def _feed_impl_(self, data, lod, lod_level):
        if lod_level == 0:
            self.data.append(data)
        else:
145
            lod[0].append(len(data))
Y
Yu Yang 已提交
146
            for each_data in data:
K
Kexin Zhao 已提交
147
                self._feed_impl_(each_data, lod[1:], lod_level - 1)
Y
Yu Yang 已提交
148

S
sneaxiy 已提交
149
    def _check_shape(self, shape):
S
sneaxiy 已提交
150 151 152 153 154 155
        for s1, s2 in zip(self.shape, shape):
            if s1 != s2 and s1 >= 0 and s2 >= 0:
                raise ValueError(
                    "Shape not match. What is defined in data layer is {}, but receive {}".
                    format(self.shape, shape))

Y
Yu Yang 已提交
156
    def done(self):
157
        arr = np.array(self.data, dtype=self.dtype)
S
sneaxiy 已提交
158 159
        if self.shape:
            if len(arr.shape) != len(self.shape):
S
sneaxiy 已提交
160 161 162 163 164 165
                try:
                    arr = arr.reshape(self.shape)
                except ValueError:
                    raise ValueError(
                        "Reshape error. What is defined in data layer is {}, but receive {}"
                        .format(self.shape, arr.shape))
Y
Yu Yang 已提交
166 167 168
        t = core.LoDTensor()
        t.set(arr, self.place)
        if self.lod_level > 0:
169
            t.set_recursive_sequence_lengths(self.lod)
S
sneaxiy 已提交
170
        self._reset()
Y
Yu Yang 已提交
171 172 173
        return t


S
sneaxiy 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
class BatchedTensorProvider(object):
    def __init__(self, feed_list, place, batch_size, generator, drop_last):
        self.place = place
        self.batch_size = batch_size
        self.generator = generator
        self.converters = []
        self.drop_last = drop_last

        for var in feed_list:
            assert var.lod_level == 0, "lod_level must be 0"
            self.converters.append(
                DataToLoDTensorConverter(
                    place=self.place,
                    lod_level=0,
                    shape=var.shape,
                    dtype=var.dtype))

    def _done(self):
        return [c.done() for c in self.converters]

    def __call__(self):
        idx = 0
        for each_sample in self.generator():
            for each_slot, each_converter in six.moves.zip(each_sample,
                                                           self.converters):
                each_converter.data.append(each_slot)

            idx += 1
            if idx == self.batch_size:
                idx = 0
                yield self._done()

        if not self.drop_last and idx > 0:
            yield self._done()
        else:
            [c._reset() for c in self.converters]


Y
Yu Yang 已提交
212
class DataFeeder(object):
C
chengduoZH 已提交
213
    """
214 215
    :api_attr: Static Graph
    
C
chengduoZH 已提交
216
    DataFeeder converts the data that returned by a reader into a data
217 218 219 220 221 222 223 224 225 226 227 228 229 230
    structure that can feed into Executor. The reader is usually a 
    python generator that returns a list of mini-batch data entries. 

    Parameters:
        feed_list (list): Variables or names of Variables that need
            to feed.
        place (:ref:`api_fluid_CPUPlace` | :ref:`api_fluid_CUDAPlace` ): 
            place indicates the device (CPU | GPU) the data will be fed into, if 
            you want to feed data into GPU, please using :code:`fluid.CUDAPlace(i)` 
            (:code:`i` represents the GPU id), or if you want to feed data into CPU, 
            please using :code:`fluid.CPUPlace()`.
        program (:ref:`api_fluid_Program` , optional): The Program that will 
            feed data into, if program is None, it will use default_main_program(). 
            Default None.
C
chengduoZH 已提交
231 232

    Raises:
233
        :code:`ValueError` - If some Variables are not in this Program.
C
chengduoZH 已提交
234

235
    Example:
236 237 238 239 240 241
        ..  code-block:: python

            import numpy as np
            import paddle
            import paddle.fluid as fluid
            
C
chengduoZH 已提交
242
            place = fluid.CPUPlace()
243
            def reader():
244 245
                for _ in range(4):
                    yield np.random.random([4]).astype('float32'), np.random.random([3]).astype('float32'),
246 247 248 249 250
            
            main_program = fluid.Program()
            startup_program = fluid.Program()
            
            with fluid.program_guard(main_program, startup_program):
251 252
                data_1 = fluid.data(name='data_1', shape=[None, 2, 2], dtype='float32')
                data_2 = fluid.data(name='data_2', shape=[None, 1, 3], dtype='float32')
253 254 255
                out = fluid.layers.fc(input=[data_1, data_2], size=2)
                # ...
            feeder = fluid.DataFeeder([data_1, data_2], place)
256
            
257 258
            exe = fluid.Executor(place)
            exe.run(startup_program)
259 260 261 262 263 264 265 266 267 268
            
            feed_data = feeder.feed(reader())
            
            # print feed_data to view feed results
            # print(feed_data['data_1'])
            # print(feed_data['data_2'])
            
            outs = exe.run(program=main_program,
                            feed=feed_data,
                            fetch_list=[out])
269
            print(outs)
270

C
chengduoZH 已提交
271 272
    """

F
fengjiayi 已提交
273
    def __init__(self, feed_list, place, program=None):
Y
Yu Yang 已提交
274 275 276 277
        self.feed_dtypes = []
        self.feed_names = []
        self.feed_shapes = []
        self.feed_lod_level = []
F
fengjiayi 已提交
278 279
        if program is None:
            program = default_main_program()
Y
Yu Yang 已提交
280
        for each_var in feed_list:
281
            if isinstance(each_var, six.string_types):
F
fengjiayi 已提交
282
                each_var = program.block(0).var(each_var)
Y
Yu Yang 已提交
283 284 285 286 287
            if not isinstance(each_var, Variable):
                raise TypeError("Feed list should contain a list of variable")
            self.feed_dtypes.append(each_var.dtype)
            self.feed_names.append(each_var.name)
            self.feed_lod_level.append(each_var.lod_level)
S
sneaxiy 已提交
288
            self.feed_shapes.append(each_var.shape)
Y
Yu Yang 已提交
289 290 291 292

        self.place = place

    def feed(self, iterable):
C
chengduoZH 已提交
293
        """
294 295
        According to :code:`feed_list` of :code:`DataFeeder` and :code:`iterable` , converts 
        the input into a data structure that can feed into Executor.
C
chengduoZH 已提交
296

297 298
        Parameters:
            iterable (generator): user defined python generator to read the raw input data
C
chengduoZH 已提交
299

300 301
        Returns: 
            :code:`dict`: a :code:`dict` that contains (variable name - converted tensor) pairs
302

303
        Example:
304 305
            ..  code-block:: python

306 307 308 309 310 311
                # In this example, reader - generator will return a list of ndarray of 3 elements
                # feed API will convert each ndarray input into a tensor
                # the return result is a dict with keys: data_1, data_2, data_3
                # result['data_1']  a LoD-Tensor with shape of  [5, 2, 1, 3]. 5 is batch size, and [2, 1, 3] is the real shape of data_1.
                # result['data_2'], result['data_3'] are similar.
                import numpy as np
312 313 314
                import paddle.fluid as fluid
                
                def reader(limit=5):
315 316
                    for i in range(1, limit + 1):
                        yield np.ones([6]).astype('float32') * i , np.ones([1]).astype('int64') * i, np.random.random([9]).astype('float32')
317
                
318 319 320
                data_1 = fluid.data(name='data_1', shape=[None, 2, 1, 3])
                data_2 = fluid.data(name='data_2', shape=[None, 1], dtype='int64')
                data_3 = fluid.data(name='data_3', shape=[None, 3, 3], dtype='float32')
321 322
                feeder = fluid.DataFeeder(['data_1','data_2', 'data_3'], fluid.CPUPlace())
                
323 324 325 326
                
                result = feeder.feed(reader())
                print(result['data_1'])
                print(result['data_2'])
327
                print(result['data_3'])
328

C
chengduoZH 已提交
329
        """
Y
Yu Yang 已提交
330
        converter = []
331
        for lod_level, shape, dtype in six.moves.zip(
Y
Yu Yang 已提交
332 333 334 335 336 337 338 339 340
                self.feed_lod_level, self.feed_shapes, self.feed_dtypes):
            converter.append(
                DataToLoDTensorConverter(
                    place=self.place,
                    lod_level=lod_level,
                    shape=shape,
                    dtype=dtype))

        for each_sample in iterable:
341
            assert len(each_sample) == len(converter), (
342 343
                "The number of fields in data (%d) does not match " +
                "len(feed_list) (%d)") % (len(each_sample), len(converter))
344 345
            for each_converter, each_slot in six.moves.zip(converter,
                                                           each_sample):
Y
Yu Yang 已提交
346 347
                each_converter.feed(each_slot)
        ret_dict = {}
348 349
        for each_name, each_converter in six.moves.zip(self.feed_names,
                                                       converter):
Y
Yu Yang 已提交
350 351
            ret_dict[each_name] = each_converter.done()
        return ret_dict
Y
yuyang18 已提交
352 353

    def feed_parallel(self, iterable, num_places=None):
C
chengduoZH 已提交
354
        """
355 356
        Similar with feed function, feed_parallel is used with multiple devices (CPU|GPU).
        Here :code:`iterable` is a list of python generators. The data return by each 
T
tianshuo78520a 已提交
357
        generator in the list will be fed into a separate device.        
C
chengduoZH 已提交
358

359
        Parameters:
T
tianshuo78520a 已提交
360
            iterable (list|tuple): list of user-defined python generators. The element 
361 362 363
                number should match the :code:`num_places`.
            num_places (int, optional): the number of devices. If not provided (None), 
                all available devices on the machine will be used. Default None.
C
chengduoZH 已提交
364

365 366 367
        Returns: 
            :code:`generator`: a :code:`generator` that generate dict which contains (variable name - converted tensor) pairs, 
            the total number of dicts will be generated matches with the :code:`num_places`
C
chengduoZH 已提交
368

369 370
        .. note::        
            The number of devices - :code:`num_places` should equal to the generator (element of :code:`iterable` ) number
371

372
        Example:
373 374
            ..  code-block:: python

375
                import numpy as np
376
                import paddle.fluid as fluid
377

378 379 380 381 382
                def generate_reader(batch_size, base=0, factor=1):
                    def _reader():
                        for i in range(batch_size):
                            yield np.ones([4]) * factor + base, np.ones([4]) * factor + base + 5
                    return _reader()
383 384 385 386

                x = fluid.data(name='x', shape=[None, 2, 2])
                y = fluid.data(name='y', shape=[None, 2, 2], dtype='float32')

387
                z = fluid.layers.elementwise_add(x, y)
388

389
                feeder = fluid.DataFeeder(['x','y'], fluid.CPUPlace())
390
                place_num = 2
391 392 393 394 395
                places = [fluid.CPUPlace() for x in range(place_num)]
                data = []
                exe = fluid.Executor(fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                program = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel(places=places)
396

T
tianshuo78520a 已提交
397
                # print sample feed_parallel r result
398 399 400
                # for item in list(feeder.feed_parallel([generate_reader(5, 0, 1), generate_reader(3, 10, 2)], 2)):
                #     print(item['x'])
                #     print(item['y'])
401

402 403 404
                reader_list = [generate_reader(5, 0, 1), generate_reader(3, 10, 2)]
                res = exe.run(program=program, feed=list(feeder.feed_parallel(reader_list, 2)), fetch_list=[z])
                print(res)
405

C
chengduoZH 已提交
406
        """
Y
yuyang18 已提交
407 408 409
        if isinstance(self.place, core.CUDAPlace):
            places = [
                core.CUDAPlace(i)
410 411
                for i in six.moves.xrange(
                    self._get_number_of_places_(num_places))
Y
yuyang18 已提交
412 413 414 415
            ]
        else:
            places = [
                core.CPUPlace()
416 417
                for _ in six.moves.xrange(
                    self._get_number_of_places_(num_places))
Y
yuyang18 已提交
418 419 420 421 422 423 424 425 426
            ]

        if len(iterable) != len(places):
            raise ValueError("feed_parallel takes multiple mini-batches. Each "
                             "mini-batch will be feed on each device. The "
                             "number of devices and number of mini-batches "
                             "must be same.")

        place = self.place
427
        for p, batch in six.moves.zip(places, iterable):
Y
yuyang18 已提交
428 429 430 431 432 433 434 435
            self.place = p
            yield self.feed(batch)
        self.place = place

    def _get_number_of_places_(self, num_places):
        if num_places is not None:
            return int(num_places)
        elif isinstance(self.place, core.CUDAPlace):
C
chengduo 已提交
436
            return len(_cuda_ids())
Y
yuyang18 已提交
437
        else:
C
chengduo 已提交
438
            return _cpu_num()
Y
yuyang18 已提交
439 440 441 442 443 444

    def decorate_reader(self,
                        reader,
                        multi_devices,
                        num_places=None,
                        drop_last=True):
C
chengduoZH 已提交
445
        """
446 447 448 449 450
        Decorate the reader (generator) to fit multiple devices. The reader generate
        multiple mini-batches. Each mini-batch will be fed into a single device.

        Parameters:
            reader(generator): a user defined python generator used to get :code:`mini-batch` of data.
T
tianshuo78520a 已提交
451
                A :code:`mini-batch` can be regarded as a python generator that returns batches of input 
452 453 454 455 456 457 458 459 460 461 462
                entities, just like the below :code:`_mini_batch` in the code example.                      
            multi_devices(bool): indicate whether to use multiple devices or not.
            num_places(int, optional): if :code:`multi_devices` is True, you can specify the number
                of devices(CPU|GPU) to use, if multi_devices is None, the function will use all the
                devices of the current machine. Default None.
            drop_last(bool, optional): whether to drop the last round of data if it is not enough to 
                feed all devices. Default True.

        Returns: 
            :code:`generator`: a new :code:`generator` which return converted dicts that can be fed into Executor
            
C
chengduoZH 已提交
463
        Raises:
464
            :code:`ValueError`: If drop_last is False and the data cannot fit devices perfectly.
465

466
        Example:
467 468
            ..  code-block:: python

469
                import numpy as np
470 471
                import paddle
                import paddle.fluid as fluid
472
                import paddle.fluid.compiler as compiler
473
                
474 475 476 477
                def reader():
                    def _mini_batch(batch_size):
                        for i in range(batch_size):
                            yield np.random.random([16]).astype('float32'), np.random.randint(10, size=[1])
478

479 480
                    for _ in range(10):
                        yield _mini_batch(np.random.randint(1, 10))
481
                
482 483
                place_num = 3
                places = [fluid.CPUPlace() for _ in range(place_num)]
484
                
485
                # a simple network sample
486 487
                data = fluid.data(name='data', shape=[None, 4, 4], dtype='float32')
                label = fluid.data(name='label', shape=[None, 1], dtype='int64')
488 489
                hidden = fluid.layers.fc(input=data, size=10)
                
490 491
                feeder = fluid.DataFeeder(place=places[0], feed_list=[data, label])
                reader = feeder.decorate_reader(reader, multi_devices=True, num_places=3, drop_last=True)
492
                
493
                exe = fluid.Executor(places[0])
494
                exe.run(fluid.default_startup_program())
495
                compiled_prog = compiler.CompiledProgram(
496 497
                         fluid.default_main_program()).with_data_parallel(places=places)
                
498
                for i,data in enumerate(reader()):
499 500
                    # print data if you like
                    # print(i, data)
501
                    ret = exe.run(compiled_prog, feed=data, fetch_list=[hidden])
502 503
                    print(ret)

C
chengduoZH 已提交
504 505
        """

Y
yuyang18 已提交
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
        def __reader_creator__():
            if not multi_devices:
                for item in reader():
                    yield self.feed(item)
            else:
                num = self._get_number_of_places_(num_places)
                item = []
                for batch in reader():
                    item.append(batch)
                    if len(item) == num:
                        yield list(self.feed_parallel(item, num))
                        item = []
                if not drop_last and len(item) != 0:
                    raise ValueError(
                        "The data batch which cannot fit for devices will be "
                        "dropped is not implementation. Other strategies are "
                        "not implemented")

        return __reader_creator__