data_feeder.py 15.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from . import core
16
import numpy as np
C
chengduoZH 已提交
17
import os
Y
yuyang18 已提交
18
import multiprocessing
19
import warnings
Y
Yu Yang 已提交
20

21 22 23 24 25 26 27
from .framework import (
    Variable,
    default_main_program,
    _current_expected_place,
    _non_static_mode,
    _in_eager_without_dygraph_check,
)
C
chengduo 已提交
28
from .framework import _cpu_num, _cuda_ids
29

Y
Yu Yang 已提交
30 31
__all__ = ['DataFeeder']

L
Leo Chen 已提交
32 33 34
_PADDLE_DTYPE_2_NUMPY_DTYPE = {
    core.VarDesc.VarType.BOOL: 'bool',
    core.VarDesc.VarType.FP16: 'float16',
35
    core.VarDesc.VarType.BF16: 'uint16',
L
Leo Chen 已提交
36 37 38 39 40 41 42 43 44 45 46
    core.VarDesc.VarType.FP32: 'float32',
    core.VarDesc.VarType.FP64: 'float64',
    core.VarDesc.VarType.INT8: 'int8',
    core.VarDesc.VarType.INT16: 'int16',
    core.VarDesc.VarType.INT32: 'int32',
    core.VarDesc.VarType.INT64: 'int64',
    core.VarDesc.VarType.UINT8: 'uint8',
    core.VarDesc.VarType.COMPLEX64: 'complex64',
    core.VarDesc.VarType.COMPLEX128: 'complex128',
}

Y
Yu Yang 已提交
47

S
sneaxiy 已提交
48
def convert_dtype(dtype):
P
pkpk 已提交
49
    if isinstance(dtype, core.VarDesc.VarType):
L
Leo Chen 已提交
50 51
        if dtype in _PADDLE_DTYPE_2_NUMPY_DTYPE:
            return _PADDLE_DTYPE_2_NUMPY_DTYPE[dtype]
52
    elif isinstance(dtype, type):
53
        # This branch is for NumPy scalar types
54
        if dtype in [
55 56 57 58 59 60 61 62 63 64 65 66
            bool,
            np.float16,
            np.uint16,
            np.float32,
            np.float64,
            np.int8,
            np.int16,
            np.int32,
            np.int64,
            np.uint8,
            np.complex64,
            np.complex128,
67 68
        ]:
            return dtype.__name__
P
pkpk 已提交
69
    else:
70
        # This branch is for np.dtype and str
P
pkpk 已提交
71
        if dtype in [
72 73 74 75 76 77 78 79 80 81 82 83
            'bool',
            'float16',
            'uint16',
            'float32',
            'float64',
            'int8',
            'int16',
            'int32',
            'int64',
            'uint8',
            'complex64',
            'complex128',
P
pkpk 已提交
84
        ]:
85 86 87
            # NOTE(SigureMo): Since the np.dtype object is not an instance of
            # type, so it will not be handled by the previous branch. We need
            # to convert it to str here.
P
pkpk 已提交
88
            return str(dtype)
89 90 91
        # NOTE(zhangbo): Now numpy does not support bfloat, and paddle use uint16 to represent bfloat16, and there binaries are consistent.
        if dtype in ['bfloat16']:
            return 'uint16'
P
pkpk 已提交
92

93
    raise TypeError(
94
        "dtype must be any of [bool, float16, uint16, float32, float64, int8, int16, "
95 96
        "int32, int64, uint8, complex64, complex128, bfloat16], but received %s"
        % dtype
97
    )
S
sneaxiy 已提交
98 99


100 101 102
def check_variable_and_dtype(
    input, input_name, expected_dtype, op_name, extra_message=''
):
103
    check_type(input, input_name, Variable, op_name, extra_message)
104 105 106 107
    check_dtype(input.dtype, input_name, expected_dtype, op_name, extra_message)


def check_type(input, input_name, expected_type, op_name, extra_message=''):
108 109 110 111 112 113 114
    # NOTE [ Why skip dynamic graph check ]:
    # 1. If the input type / dtype of a layer is wrong, it will be reported
    # directly on that line. User can easily print the relevant information
    # on which line. It is easier to debug, so there is no need to check
    # in dynamic graph mode.
    # 2. Performance considerations. Because these checks are executed at
    # each step in dynamic graph mode, it will bring a heavy performance burden.
J
Jiabin Yang 已提交
115
    if _non_static_mode():
116
        return
117 118

    # NOTE: `in_declarative_mode` is used to determined whether this op is called under
W
wanghuancoder 已提交
119 120
    # @to_static in transformation from dygrah to static layer. We add Tensor in
    # expected_type to skip checking because Tensor may be created and used in unusual way.
121
    from .dygraph.base import in_declarative_mode
122

123 124 125
    # Need a better design to be fix this.
    if in_declarative_mode():
        if not isinstance(expected_type, tuple):
126
            expected_type = (expected_type,)
W
wanghuancoder 已提交
127 128
        expected_type += (core.eager.Tensor,)
    elif isinstance(input, core.eager.Tensor):
129 130 131
        raise TypeError(
            "Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. "
            "Because received '{}' in {} is a imperative Variable.".format(
132 133 134
                input_name, op_name
            )
        )
135 136
    if not isinstance(input, expected_type):
        raise TypeError(
137 138 139
            "The type of '%s' in %s must be %s, but received %s. %s"
            % (input_name, op_name, expected_type, type(input), extra_message)
        )
140 141


142 143 144
def check_dtype(
    input_dtype, input_name, expected_dtype, op_name, extra_message=''
):
145
    # See NOTE [ Why skip dynamic graph check ]
J
Jiabin Yang 已提交
146
    if _non_static_mode():
147
        return
148 149
    if convert_dtype(input_dtype) in ['float16']:
        warnings.warn(
150 151 152
            "The data type of '%s' in %s only support float16 in GPU now. %s"
            % (input_name, op_name, extra_message)
        )
153
    if convert_dtype(input_dtype) in ['uint16'] and op_name not in [
154 155 156
        'reshape',
        'lookup_table',
        'scale',
157 158 159
    ]:
        warnings.warn(
            "The data type of '%s' in %s only support bfloat16 in OneDNN now. %s"
160 161
            % (input_name, op_name, extra_message)
        )
162 163
    if convert_dtype(input_dtype) not in expected_dtype:
        raise TypeError(
164 165 166
            "The data type of '%s' in %s must be %s, but received %s. %s"
            % (
                input_name,
167
                op_name,
168 169 170 171 172 173 174 175 176 177 178 179 180 181
                expected_dtype,
                convert_dtype(input_dtype),
                extra_message,
            )
        )


def check_shape(
    shape,
    op_name,
    expected_shape_type=(list, tuple, Variable),
    expected_element_type=(int, Variable),
    expected_tensor_dtype=('int32', 'int64'),
):
182
    # See NOTE [ Why skip dynamic graph check ]
J
Jiabin Yang 已提交
183
    if _non_static_mode():
184 185 186 187 188 189 190
        return
    check_type(shape, 'shape', expected_shape_type, op_name)
    if expected_element_type is not None and not isinstance(shape, Variable):
        for item in shape:
            check_type(item, 'element of shape', expected_element_type, op_name)
            if expected_tensor_dtype is not None and isinstance(item, Variable):
                check_dtype(
191 192 193
                    item.dtype,
                    'element of shape',
                    expected_tensor_dtype,
194
                    op_name,
195 196 197 198
                    'If element of shape is Tensor, its data type should be {}'.format(
                        ', '.join(expected_tensor_dtype)
                    ),
                )
199 200 201 202
    if expected_tensor_dtype is not None and isinstance(shape, Variable):
        check_dtype(shape.dtype, 'shape', expected_tensor_dtype, op_name)


203
class DataToLoDTensorConverter:
Y
Yu Yang 已提交
204 205 206 207
    def __init__(self, place, lod_level, shape, dtype):
        self.place = place
        self.lod_level = lod_level
        self.shape = shape
208 209 210 211 212 213 214
        negtive_count = 0
        for s in self.shape:
            if s < 0:
                negtive_count += 1
            if negtive_count > 1:
                self.shape = None
                break
S
sneaxiy 已提交
215 216
        self.dtype = convert_dtype(dtype)
        self._reset()
Y
Yu Yang 已提交
217

S
sneaxiy 已提交
218
    def _reset(self):
Y
Yu Yang 已提交
219
        self.data = []
220
        self.lod = [[] for _ in range(self.lod_level)]
Y
Yu Yang 已提交
221 222 223 224 225 226 227 228

    def feed(self, data):
        self._feed_impl_(data, self.lod, self.lod_level)

    def _feed_impl_(self, data, lod, lod_level):
        if lod_level == 0:
            self.data.append(data)
        else:
229
            lod[0].append(len(data))
Y
Yu Yang 已提交
230
            for each_data in data:
K
Kexin Zhao 已提交
231
                self._feed_impl_(each_data, lod[1:], lod_level - 1)
Y
Yu Yang 已提交
232

S
sneaxiy 已提交
233
    def _check_shape(self, shape):
S
sneaxiy 已提交
234 235 236
        for s1, s2 in zip(self.shape, shape):
            if s1 != s2 and s1 >= 0 and s2 >= 0:
                raise ValueError(
237 238 239 240
                    "Shape not match. What is defined in data layer is {}, but receive {}".format(
                        self.shape, shape
                    )
                )
S
sneaxiy 已提交
241

Y
Yu Yang 已提交
242
    def done(self):
243
        arr = np.array(self.data, dtype=self.dtype)
S
sneaxiy 已提交
244 245
        if self.shape:
            if len(arr.shape) != len(self.shape):
S
sneaxiy 已提交
246 247 248 249
                try:
                    arr = arr.reshape(self.shape)
                except ValueError:
                    raise ValueError(
250 251 252 253
                        "Reshape error. What is defined in data layer is {}, but receive {}".format(
                            self.shape, arr.shape
                        )
                    )
Y
Yu Yang 已提交
254 255 256
        t = core.LoDTensor()
        t.set(arr, self.place)
        if self.lod_level > 0:
257
            t.set_recursive_sequence_lengths(self.lod)
S
sneaxiy 已提交
258
        self._reset()
Y
Yu Yang 已提交
259 260 261
        return t


262
class BatchedTensorProvider:
S
sneaxiy 已提交
263 264 265 266 267 268 269 270 271 272
    def __init__(self, feed_list, place, batch_size, generator, drop_last):
        self.place = place
        self.batch_size = batch_size
        self.generator = generator
        self.converters = []
        self.drop_last = drop_last

        for var in feed_list:
            assert var.lod_level == 0, "lod_level must be 0"
            self.converters.append(
273 274 275 276 277 278 279
                DataToLoDTensorConverter(
                    place=self.place,
                    lod_level=0,
                    shape=var.shape,
                    dtype=var.dtype,
                )
            )
S
sneaxiy 已提交
280 281 282 283 284 285 286

    def _done(self):
        return [c.done() for c in self.converters]

    def __call__(self):
        idx = 0
        for each_sample in self.generator():
287
            for each_slot, each_converter in zip(each_sample, self.converters):
S
sneaxiy 已提交
288 289 290 291 292 293 294 295 296 297 298 299 300
                each_converter.data.append(each_slot)

            idx += 1
            if idx == self.batch_size:
                idx = 0
                yield self._done()

        if not self.drop_last and idx > 0:
            yield self._done()
        else:
            [c._reset() for c in self.converters]


301
class DataFeeder:
C
chengduoZH 已提交
302
    """
303
    :api_attr: Static Graph
304

C
chengduoZH 已提交
305
    DataFeeder converts the data that returned by a reader into a data
306 307
    structure that can feed into Executor. The reader is usually a
    python generator that returns a list of mini-batch data entries.
308 309 310 311

    Parameters:
        feed_list (list): Variables or names of Variables that need
            to feed.
312 313 314 315
        place (:ref:`api_fluid_CPUPlace` | :ref:`api_fluid_CUDAPlace` ):
            place indicates the device (CPU | GPU) the data will be fed into, if
            you want to feed data into GPU, please using :code:`fluid.CUDAPlace(i)`
            (:code:`i` represents the GPU id), or if you want to feed data into CPU,
316
            please using :code:`fluid.CPUPlace()`.
317 318
        program (:ref:`api_fluid_Program` , optional): The Program that will
            feed data into, if program is None, it will use default_main_program().
319
            Default None.
C
chengduoZH 已提交
320 321

    Raises:
322
        :code:`ValueError` - If some Variables are not in this Program.
C
chengduoZH 已提交
323

324
    Example:
325 326 327 328 329
        ..  code-block:: python

            import numpy as np
            import paddle
            import paddle.fluid as fluid
330

C
chengduoZH 已提交
331
            place = fluid.CPUPlace()
332
            def reader():
333 334
                for _ in range(4):
                    yield np.random.random([4]).astype('float32'), np.random.random([3]).astype('float32'),
335

336 337
            main_program = fluid.Program()
            startup_program = fluid.Program()
338

339
            with fluid.program_guard(main_program, startup_program):
340 341
                data_1 = paddle.static.data(name='data_1', shape=[None, 2, 2], dtype='float32')
                data_2 = paddle.static.data(name='data_2', shape=[None, 1, 3], dtype='float32')
C
Charles-hit 已提交
342
                out = paddle.static.nn.fc(x=[data_1, data_2], size=2)
343 344
                # ...
            feeder = fluid.DataFeeder([data_1, data_2], place)
345

346 347
            exe = fluid.Executor(place)
            exe.run(startup_program)
348

349
            feed_data = feeder.feed(reader())
350

351 352 353
            # print feed_data to view feed results
            # print(feed_data['data_1'])
            # print(feed_data['data_2'])
354

355 356 357
            outs = exe.run(program=main_program,
                            feed=feed_data,
                            fetch_list=[out])
358
            print(outs)
359

C
chengduoZH 已提交
360 361
    """

F
fengjiayi 已提交
362
    def __init__(self, feed_list, place, program=None):
Y
Yu Yang 已提交
363 364 365 366
        self.feed_dtypes = []
        self.feed_names = []
        self.feed_shapes = []
        self.feed_lod_level = []
F
fengjiayi 已提交
367 368
        if program is None:
            program = default_main_program()
Y
Yu Yang 已提交
369
        for each_var in feed_list:
370
            if isinstance(each_var, str):
F
fengjiayi 已提交
371
                each_var = program.block(0).var(each_var)
Y
Yu Yang 已提交
372 373 374 375 376
            if not isinstance(each_var, Variable):
                raise TypeError("Feed list should contain a list of variable")
            self.feed_dtypes.append(each_var.dtype)
            self.feed_names.append(each_var.name)
            self.feed_lod_level.append(each_var.lod_level)
S
sneaxiy 已提交
377
            self.feed_shapes.append(each_var.shape)
Y
Yu Yang 已提交
378 379 380 381

        self.place = place

    def feed(self, iterable):
C
chengduoZH 已提交
382
        """
383
        According to :code:`feed_list` of :code:`DataFeeder` and :code:`iterable` , converts
384
        the input into a data structure that can feed into Executor.
C
chengduoZH 已提交
385

386 387
        Parameters:
            iterable (generator): user defined python generator to read the raw input data
C
chengduoZH 已提交
388

389
        Returns:
390
            :code:`dict`: a :code:`dict` that contains (variable name - converted tensor) pairs
391

392
        Example:
393 394
            ..  code-block:: python

395 396 397 398 399 400
                # In this example, reader - generator will return a list of ndarray of 3 elements
                # feed API will convert each ndarray input into a tensor
                # the return result is a dict with keys: data_1, data_2, data_3
                # result['data_1']  a LoD-Tensor with shape of  [5, 2, 1, 3]. 5 is batch size, and [2, 1, 3] is the real shape of data_1.
                # result['data_2'], result['data_3'] are similar.
                import numpy as np
401
                import paddle.fluid as fluid
402

403
                def reader(limit=5):
404 405
                    for i in range(1, limit + 1):
                        yield np.ones([6]).astype('float32') * i , np.ones([1]).astype('int64') * i, np.random.random([9]).astype('float32')
406

407 408 409
                data_1 = paddle.static.data(name='data_1', shape=[None, 2, 1, 3])
                data_2 = paddle.static.data(name='data_2', shape=[None, 1], dtype='int64')
                data_3 = paddle.static.data(name='data_3', shape=[None, 3, 3], dtype='float32')
410
                feeder = fluid.DataFeeder(['data_1','data_2', 'data_3'], fluid.CPUPlace())
411 412


413 414 415
                result = feeder.feed(reader())
                print(result['data_1'])
                print(result['data_2'])
416
                print(result['data_3'])
417

C
chengduoZH 已提交
418
        """
Y
Yu Yang 已提交
419
        converter = []
420 421 422
        for lod_level, shape, dtype in zip(
            self.feed_lod_level, self.feed_shapes, self.feed_dtypes
        ):
Y
Yu Yang 已提交
423
            converter.append(
424 425 426 427 428 429 430
                DataToLoDTensorConverter(
                    place=self.place,
                    lod_level=lod_level,
                    shape=shape,
                    dtype=dtype,
                )
            )
Y
Yu Yang 已提交
431 432

        for each_sample in iterable:
433
            assert len(each_sample) == len(converter), (
434 435 436
                "The number of fields in data (%d) does not match "
                + "len(feed_list) (%d)"
            ) % (len(each_sample), len(converter))
437
            for each_converter, each_slot in zip(converter, each_sample):
Y
Yu Yang 已提交
438 439
                each_converter.feed(each_slot)
        ret_dict = {}
440
        for each_name, each_converter in zip(self.feed_names, converter):
Y
Yu Yang 已提交
441 442
            ret_dict[each_name] = each_converter.done()
        return ret_dict
Y
yuyang18 已提交
443 444 445 446 447

    def _get_number_of_places_(self, num_places):
        if num_places is not None:
            return int(num_places)
        elif isinstance(self.place, core.CUDAPlace):
C
chengduo 已提交
448
            return len(_cuda_ids())
Y
yuyang18 已提交
449
        else:
C
chengduo 已提交
450
            return _cpu_num()