io.py 30.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
F
fengjiayi 已提交
14
import contextlib
D
dzhwinter 已提交
15

Y
Yu Yang 已提交
16
from .. import core
T
typhoonzero 已提交
17
from ..framework import convert_np_dtype_to_dtype_, default_main_program, default_startup_program, Program
Y
Yu Yang 已提交
18
from ..unique_name import generate as unique_name
T
WIP  
typhoonzero 已提交
19 20
from control_flow import BlockGuard
from ..layer_helper import LayerHelper
Y
Refine  
Yu Yang 已提交
21
from ..executor import global_scope
Y
yuyang18 已提交
22
from layer_function_generator import generate_layer_fn, templatedoc
Y
Yu Yang 已提交
23

Y
Yu Yang 已提交
24
__all__ = [
S
sneaxiy 已提交
25 26 27 28
    'data', 'BlockGuardServ', 'ListenAndServ', 'Send', 'Recv',
    'open_recordio_file', 'open_files', 'read_file', 'shuffle', 'batch',
    'double_buffer', 'random_data_generator', 'py_reader', 'Preprocessor',
    'load'
Y
Yu Yang 已提交
29
]
Y
Yu Yang 已提交
30 31 32 33 34 35 36 37 38 39


def data(name,
         shape,
         append_batch_size=True,
         dtype='float32',
         lod_level=0,
         type=core.VarDesc.VarType.LOD_TENSOR,
         stop_gradient=True):
    """
K
kavyasrinet 已提交
40
    **Data Layer**
Y
Yu Yang 已提交
41

K
kavyasrinet 已提交
42
    This function takes in the input and based on whether data has
C
caoying03 已提交
43
    to be returned back as a minibatch, it creates the global variable by using
Y
Yu Yang 已提交
44
    the helper functions. The global variables can be accessed by all the
C
caoying03 已提交
45
    following operators in the graph.
Y
Yu Yang 已提交
46 47 48 49

    All the input variables of this function are passed in as local variables
    to the LayerHelper constructor.

K
kavyasrinet 已提交
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
    Args:
       name(str): The name/alias of the function
       shape(list): Tuple declaring the shape.
       append_batch_size(bool): Whether or not to append the data as a batch.
       dtype(int|float): The type of data : float32, float_16, int etc
       type(VarType): The output type. By default it is LOD_TENSOR.
       lod_level(int): The LoD Level. 0 means the input data is not a sequence.
       stop_gradient(bool): A boolean that mentions whether gradient should flow.

    Returns:
        Variable: The global variable that gives access to the data.

    Examples:
        .. code-block:: python

          data = fluid.layers.data(name='x', shape=[784], dtype='float32')
Y
Yu Yang 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78
    """
    helper = LayerHelper('data', **locals())
    shape = list(shape)
    for i in xrange(len(shape)):
        if shape[i] is None:
            shape[i] = -1
            append_batch_size = False
        elif shape[i] < 0:
            append_batch_size = False

    if append_batch_size:
        shape = [-1] + shape  # append batch size as -1

Y
Yu Yang 已提交
79
    data_var = helper.create_global_variable(
Y
Yu Yang 已提交
80 81 82 83 84
        name=name,
        shape=shape,
        dtype=dtype,
        type=type,
        stop_gradient=stop_gradient,
F
fengjiayi 已提交
85 86
        lod_level=lod_level,
        is_data=True)
Y
Yu Yang 已提交
87
    return data_var
T
typhoonzero 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112


class BlockGuardServ(BlockGuard):
    """
    BlockGuardServ class.

    BlockGuardServ class is used to create an op with a block in a program.
    """

    def __init__(self, server):
        if not (isinstance(server, ListenAndServ)):
            raise TypeError("BlockGuardServ takes a ListenAndServ")
        super(BlockGuardServ, self).__init__(server.helper.main_program)
        self.server = server

    def __exit__(self, exc_type, exc_val, exc_tb):
        if exc_type is not None:
            return False

        self.server.complete_op()
        return super(BlockGuardServ, self).__exit__(exc_type, exc_val, exc_tb)


class ListenAndServ(object):
    """
Y
yi.wu 已提交
113
    **ListenAndServ Layer**
T
typhoonzero 已提交
114

Y
yi.wu 已提交
115 116 117 118 119 120 121 122 123
    ListenAndServ is used to create a rpc server bind and listen
    on specific TCP port, this server will run the sub-block when
    received variables from clients.

    Args:
        endpoint(string): IP:port string which the server will listen on.
        inputs(list): a list of variables that the server will get from clients.
        fan_in(int): how many client are expected to report to this server, default: 1.
        optimizer_mode(bool): whether to run the server as a parameter server, default: True.
Y
update  
yi.wu 已提交
124

Y
yi.wu 已提交
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
    Examples:
        .. code-block:: python

            with fluid.program_guard(main):
                serv = layers.ListenAndServ(
                    "127.0.0.1:6170", ["X"], optimizer_mode=False)
                with serv.do():
                    x = layers.data(
                        shape=[32, 32],
                        dtype='float32',
                        name="X",
                        append_batch_size=False)
                    fluid.initializer.Constant(value=1.0)(x, main.global_block())
                    layers.scale(x=x, scale=10.0, out=out_var)

Y
yi.wu 已提交
140 141
            exe = fluid.Executor(place)
            exe.run(main)
T
typhoonzero 已提交
142 143
    """

Y
Yancey1989 已提交
144
    def __init__(self, endpoint, inputs, fan_in=1, optimizer_mode=True):
145
        self.helper = LayerHelper("listen_and_serv")
Y
Yancey1989 已提交
146
        self.inputs = inputs
T
typhoonzero 已提交
147 148 149
        self.outputs = []
        self.endpoint = endpoint
        self.fan_in = fan_in
T
typhoonzero 已提交
150 151
        # FIXME(typhoonzero): add optimizer_mode is stupid, should make it more
        # general.
T
WIP  
typhoonzero 已提交
152
        self.optimizer_mode = optimizer_mode
T
typhoonzero 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165

    def do(self):
        return BlockGuardServ(self)

    def get_params_and_grads(self):
        main_program = self.helper.main_program
        current_block = main_program.current_block()
        parent_block = self.parent_block()
        # params and grads in the same order.
        params = list()
        grads = list()
        for op in current_block.ops:
            # FIXME(typhoonzero): op.inputs is None if it's cloned.
T
WIP  
typhoonzero 已提交
166 167 168 169 170 171 172 173
            if self.optimizer_mode:
                if "Grad" in op.inputs and "Param" in op.inputs:
                    params.append(op.inputs["Param"].name)
                    grads.append(op.inputs["Grad"].name)
            else:
                # simple recv mode, recv operators inputs.
                for iname in op.input_names:
                    for in_var_name in op.input(iname):
T
typhoonzero 已提交
174 175
                        params.append(parent_block.var(in_var_name))
                        grads.append(parent_block.var(in_var_name))
T
typhoonzero 已提交
176 177 178

        return params, grads

T
typhoonzero 已提交
179 180 181 182 183 184 185
    def parent_block(self):
        prog = self.helper.main_program
        parent_idx = prog.current_block().parent_idx
        assert parent_idx >= 0
        parent_block = prog.block(parent_idx)
        return parent_block

T
typhoonzero 已提交
186 187 188 189 190 191
    def complete_op(self):
        main_program = self.helper.main_program
        current_block = main_program.current_block()
        parent_block = self.parent_block()

        parent_block.append_op(
192
            type='listen_and_serv',
Y
Yancey1989 已提交
193
            inputs={"X": self.inputs},
T
typhoonzero 已提交
194 195 196 197
            outputs={},
            attrs={
                'endpoint': self.endpoint,
                'Fanin': self.fan_in,
Y
Yancey1989 已提交
198 199 200
                'optimize_blocks': [
                    current_block
                ],  # did not support multiple optimize blocks in layers
201
                'sync_mode': True,  # did not support async now in layers
Q
qiaolongfei 已提交
202
                'grad_to_block_id': [""]
T
typhoonzero 已提交
203 204 205
            })


Y
yi.wu 已提交
206
def Send(endpoints, send_vars, sync=True):
T
typhoonzero 已提交
207
    """
Y
yi.wu 已提交
208 209
    Send variables to the server side, and get vars from server
    side when server have finished running server side program.
T
typhoonzero 已提交
210 211

    Args:
Y
yi.wu 已提交
212
        endpoints (str): comma seperated IP:PORT pairs in the order
T
typhoonzero 已提交
213
                   of send_vars to send
Y
yi.wu 已提交
214 215
        send_vars (list): variables to send to server
        sync (bool): whether to wait the request finish
T
typhoonzero 已提交
216 217 218 219 220

    """
    assert (type(send_vars) == list)

    epmap = endpoints.split(",")
T
typhoonzero 已提交
221
    endpoints = list(set(epmap))
T
typhoonzero 已提交
222 223

    helper = LayerHelper("Send", **locals())
Y
Yancey1989 已提交
224
    rpc_op_role_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
Y
Yancey1989 已提交
225

T
typhoonzero 已提交
226 227 228
    helper.append_op(
        type="send",
        inputs={"X": send_vars},
Y
Yancey1989 已提交
229 230 231 232 233
        attrs={
            "endpoints": endpoints,
            "epmap": epmap,
            rpc_op_role_name: core.op_proto_and_checker_maker.OpRole.RPC
        })
Y
yi.wu 已提交
234 235
    if sync:
        helper.append_op(type="send_barrier", attrs={"endpoints": endpoints})
236 237


Y
yi.wu 已提交
238
def Recv(endpoints, get_vars, sync=True):
239
    """
Y
yi.wu 已提交
240
    Receive variables from server side
241 242

    Args:
Y
yi.wu 已提交
243
        endpoints (str): comma seperated IP:PORT pairs in the order
244
                   of send_vars to send
Y
yi.wu 已提交
245 246
        get_vars (list): vars to get from server after send completes.
        sync (bool): whether to wait the request finish
247

Y
yi.wu 已提交
248 249
    Returns:
        list: list of received variables
250 251 252 253 254 255 256 257 258 259 260 261 262
    """
    assert (type(get_vars) == list)

    epmap = endpoints.split(",")
    endpoints = list(set(epmap))

    helper = LayerHelper("Recv", **locals())
    helper.append_op(
        type="recv",
        inputs={"X": get_vars},
        outputs={"Out": get_vars},
        attrs={"endpoints": endpoints,
               "epmap": epmap})
Y
yi.wu 已提交
263 264 265
    if sync:
        helper.append_op(type="fetch_barrier", attrs={"endpoints": endpoints})
    return get_vars
Y
Yu Yang 已提交
266 267


Y
Refine  
Yu Yang 已提交
268 269 270 271 272 273 274 275 276 277
def monkey_patch_reader_methods(reader):
    def __get_reader__():
        scope = global_scope()
        var = scope.find_var(reader.name)
        return var.get_reader()

    def reset():
        return __get_reader__().reset()

    reader.reset = reset
Y
Yu Yang 已提交
278 279
    reader.stop_gradient = True
    reader.persistable = True
Y
Refine  
Yu Yang 已提交
280 281 282
    return reader


Y
Yu Yang 已提交
283 284 285 286 287
def _copy_reader_var_(block, var):
    new_var = block.create_var(name=var.name, type=core.VarDesc.VarType.READER)
    new_var.desc.set_shapes(var.desc.shapes())
    new_var.desc.set_dtypes(var.desc.dtypes())
    new_var.persistable = True
F
fengjiayi 已提交
288 289 290 291
    return new_var


def _copy_reader_create_op_(block, op):
F
fengjiayi 已提交
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
    input_param_names = op.input_names
    new_input_map = {}
    for param_name in input_param_names:
        new_input_map[param_name] = []
        arg_names = op.input(param_name)
        for arg_name in arg_names:
            new_input_map[param_name].append(block.var(arg_name))

    output_param_names = op.output_names
    new_output_map = {}
    for param_name in output_param_names:
        new_output_map[param_name] = []
        arg_names = op.output(param_name)
        for arg_name in arg_names:
            new_output_map[param_name].append(block.var(arg_name))

F
fengjiayi 已提交
308
    new_op = block.append_op(
F
fengjiayi 已提交
309 310 311
        type=op.type,
        inputs=new_input_map,
        outputs=new_output_map,
J
JiayiFeng 已提交
312
        attrs=op.all_attrs())
F
fengjiayi 已提交
313
    return new_op
Y
Yu Yang 已提交
314 315


Y
yuyang18 已提交
316
@templatedoc(op_type='create_recordio_file_reader')
F
fengjiayi 已提交
317 318 319 320 321
def open_recordio_file(filename,
                       shapes,
                       lod_levels,
                       dtypes,
                       pass_num=1,
F
fengjiayi 已提交
322
                       for_parallel=True):
F
fengjiayi 已提交
323
    """
Y
yuyang18 已提交
324
    ${comment}
F
fengjiayi 已提交
325 326

    Args:
Y
yuyang18 已提交
327
       filename(${filename_type}): ${filename_comment}.
F
fengjiayi 已提交
328
       shapes(list): List of tuples which declaring data shapes.
Y
yuyang18 已提交
329
       lod_levels(${lod_levels_type}): ${lod_levels_comment}.
F
fengjiayi 已提交
330
       dtypes(list): List of strs which declaring data type.
F
fengjiayi 已提交
331
       pass_num(int): Number of passes to run.
F
fengjiayi 已提交
332 333 334 335
       for_parallel(Bool): Set it as True if you are going to run
            subsequent operators in parallel.

    Returns:
Y
yuyang18 已提交
336
       ${out_comment}.
F
fengjiayi 已提交
337 338 339

    Examples:

Y
yuyang18 已提交
340 341 342 343 344 345 346 347
        >>> import paddle.fluid as fluid
        >>> reader = fluid.layers.io.open_recordio_file(
        >>>                               filename='./data.recordio',
        >>>                               shapes=[(3,224,224), (1)],
        >>>                               lod_levels=[0, 0],
        >>>                               dtypes=['float32', 'int64'])
        >>> # Via the reader, we can use 'read_file' layer to get data:
        >>> image, label = fluid.layers.io.read_file(reader)
F
fengjiayi 已提交
348
    """
Y
Yu Yang 已提交
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
    dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
    shape_concat = []
    ranks = []

    for shape in shapes:
        shape_concat.extend(shape)
        ranks.append(len(shape))

    var_name = unique_name('open_recordio_file')

    startup_blk = default_startup_program().current_block()
    startup_var = startup_blk.create_var(name=var_name)
    startup_blk.append_op(
        type='create_recordio_file_reader',
        outputs={'Out': [startup_var]},
        attrs={
            'shape_concat': shape_concat,
            'lod_levels': lod_levels,
            'filename': filename,
            'ranks': ranks
        })

    startup_var.desc.set_dtypes(dtypes)
    startup_var.persistable = True
F
fengjiayi 已提交
373 374
    main_prog_var = _copy_reader_var_(default_main_program().current_block(),
                                      startup_var)
F
fengjiayi 已提交
375 376 377 378

    if pass_num > 1:
        main_prog_var = multi_pass(reader=main_prog_var, pass_num=pass_num)

F
fengjiayi 已提交
379
    return monkey_patch_reader_methods(main_prog_var)
Y
Yu Yang 已提交
380 381


F
fengjiayi 已提交
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
def random_data_generator(low, high, shapes, lod_levels, for_parallel=True):
    """
    Create a uniform random data generator

    This layer returns a Reader Variable.
    Instead of opening a file and reading data from it, this 
    Reader Variable generates float uniform random data by itself. 
    It can be used as a dummy reader to test a network without 
    opening a real file.

    Args:
       low(float): The lower bound of data's uniform distribution.
       high(float): The upper bound of data's uniform distribution.
       shapes(list): List of tuples which declaring data shapes.
       lod_levels(list): List of ints which declaring data lod_level.
       for_parallel(Bool): Set it as True if you are going to run
            subsequent operators in parallel.

    Returns:
       Variable: A Reader Variable from which we can get random data.

    Examples:

405
        .. code-block:: python
F
fengjiayi 已提交
406

407 408 409 410 411 412 413
            reader = fluid.layers.random_data_generator(
                                             low=0.0,
                                             high=1.0,
                                             shapes=[[3,224,224], [1]],
                                             lod_levels=[0, 0])
            # Via the reader, we can use 'read_file' layer to get data:
            image, label = fluid.layers.read_file(reader)
F
fengjiayi 已提交
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
    """
    dtypes = [core.VarDesc.VarType.FP32] * len(shapes)
    shape_concat = []
    ranks = []

    for shape in shapes:
        shape_concat.extend(shape)
        ranks.append(len(shape))

    var_name = unique_name('random_data_generator')

    startup_blk = default_startup_program().current_block()
    startup_var = startup_blk.create_var(name=var_name)
    startup_blk.append_op(
        type='create_random_data_generator',
        outputs={'Out': [startup_var]},
        attrs={
            'low': low,
            'high': high,
            'shape_concat': shape_concat,
            'lod_levels': lod_levels,
            'ranks': ranks
        })

    startup_var.desc.set_dtypes(dtypes)
    startup_var.persistable = True
    main_prog_var = _copy_reader_var_(default_main_program().current_block(),
                                      startup_var)

    if for_parallel:
        main_prog_var = parallel(reader=main_prog_var)

    return monkey_patch_reader_methods(main_prog_var)


S
sneaxiy 已提交
449
def py_reader(capacity, shapes, lod_levels, dtypes):
S
sneaxiy 已提交
450 451
    """
    Create a reader and blocking queue for data feeding in Python
S
sneaxiy 已提交
452
    
S
sneaxiy 已提交
453
    This layer returns a Reader Variable and a BlockingQueue.
S
sneaxiy 已提交
454 455 456 457 458 459
    The BlockingQueue provides `push()` method to push a `LoDTensorArray` 
    object into the queue in Python side. In C++ side, the Reader 
    Variable would invoke `pop()` method of the queue to retrieve the 
    feeding data. The process of feeding data in Python side and fetching 
    data in C++ side can run in parallel. The BlockingQueue should be closed 
    using `push_eof()` method when unused.
S
sneaxiy 已提交
460 461 462 463 464

    Args:
       capacity(int): The maximum capacity of the BlockingQueue.
       shapes(list): List of tuples which declaring data shapes.
       lod_levels(list): List of ints which declaring data lod_level.
S
sneaxiy 已提交
465
       dtypes(list): List of strs which declaring data type.
S
sneaxiy 已提交
466 467

    Returns:
S
sneaxiy 已提交
468 469 470 471
       tuple(Variable, BlockingQueue):
       A Reader Variable from which we can get feeding data.
       
       A BlockingQueue object for data feeding.
S
sneaxiy 已提交
472 473 474 475 476 477 478 479 480 481 482 483

    Examples:

        .. code-block:: python

            reader, queue = fluid.layers.py_reader(
                                             capacity=10,
                                             shapes=[[-1,3,224,224], [-1,1]],
                                             lod_levels=[0, 0],
                                             dtypes=['float32', 'int64'])
            # Via the reader, we can use 'read_file' layer to get data:
            image, label = fluid.layers.read_file(reader)
S
sneaxiy 已提交
484
            
S
sneaxiy 已提交
485 486 487 488 489 490 491 492 493
            # Via the blocking queue, we can feed data using threads
            def feed_data(queue, feed_images, feed_labels):
                for feed_image, feed_label in zip(feed_images, feed_labels):
                    data = core.LoDTensorArray()
                    data.append(feed_image)
                    data.append(feed_label)
                    queue.push(data)
            
            thread = threading.Thread(target=feed_data, args=(queue, feed_images, feed_labels))
S
sneaxiy 已提交
494
            thread.start()
S
sneaxiy 已提交
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
    """
    dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
    shape_concat = []
    ranks = []

    for shape in shapes:
        shape_concat.extend(shape)
        ranks.append(len(shape))

    queue_name = unique_name('lod_tensor_blocking_queue')
    var = global_scope().var(queue_name)
    feed_queue = core.init_lod_tensor_blocking_queue(var, capacity, shapes)

    startup_blk = default_startup_program().current_block()
    startup_var = startup_blk.create_var(name=unique_name('create_py_reader'))
    startup_blk.append_op(
        type='create_py_reader',
        inputs={'blocking_queue': queue_name},
        outputs={'Out': [startup_var]},
        attrs={
            'shape_concat': shape_concat,
            'lod_levels': lod_levels,
            'ranks': ranks
        })

    startup_var.desc.set_dtypes(dtypes)
    startup_var.persistable = True

    main_prog_var = _copy_reader_var_(default_main_program().current_block(),
                                      startup_var)

    return monkey_patch_reader_methods(main_prog_var), feed_queue


529 530 531 532
def open_files(filenames,
               shapes,
               lod_levels,
               dtypes,
Y
yi.wu 已提交
533
               thread_num=1,
F
fengjiayi 已提交
534 535
               buffer_size=None,
               pass_num=1,
F
fengjiayi 已提交
536
               for_parallel=True):
F
fengjiayi 已提交
537 538 539
    """
    Open files

F
fengjiayi 已提交
540 541 542
    This layer takes a list of files to read from and returns a Reader Variable. 
    Via the Reader Variable, we can get data from given files. All files must 
    have name suffixs to indicate their formats, e.g., '*.recordio'. 
F
fengjiayi 已提交
543 544 545 546 547 548 549

    Args:
       filenames(list): The list of file names.
       shapes(list): List of tuples which declaring data shapes.
       lod_levels(list): List of ints which declaring data lod_level.
       dtypes(list): List of strs which declaring data type.
       thread_num(int): The maximal concurrent prefetch thread number.
550 551 552
       buffer_size(int|None): The size of prefetch buffer. If it is setted None, 
            buffer size will be thread_num * 3.
            Default: None
F
fengjiayi 已提交
553
       pass_num(int): Number of passes to run.
F
fengjiayi 已提交
554 555
       for_parallel(Bool): Set it as True if you are going to run 
            subsequent operators in parallel.
556
            Default: True
F
fengjiayi 已提交
557 558 559 560 561 562 563

    Returns:
       Variable: A Reader Variable via which we can get file data.

    Examples:
       .. code-block:: python

F
fengjiayi 已提交
564
         reader = fluid.layers.io.open_files(filenames=['./data1.recordio',
F
fengjiayi 已提交
565
                                                     './data2.recordio'],
F
fengjiayi 已提交
566 567 568 569 570
                                             shapes=[(3,224,224), (1)],
                                             lod_levels=[0, 0],
                                             dtypes=['float32', 'int64'],
                                             thread_num=2,
                                             buffer_size=2)
F
fengjiayi 已提交
571 572

         # Via the reader, we can use 'read_file' layer to get data:
F
fengjiayi 已提交
573
         image, label = fluid.layers.io.read_file(reader)
F
fengjiayi 已提交
574
    """
575
    if buffer_size is None:
576
        buffer_size = thread_num * 3
F
fengjiayi 已提交
577 578
    if isinstance(filenames, basestring):
        filenames = [filenames]
F
fengjiayi 已提交
579 580 581 582 583 584 585 586
    dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
    shape_concat = []
    ranks = []

    for shape in shapes:
        shape_concat.extend(shape)
        ranks.append(len(shape))

F
fengjiayi 已提交
587
    multi_file_reader_name = unique_name('multi_file_reader')
F
fengjiayi 已提交
588
    startup_blk = default_startup_program().current_block()
F
fengjiayi 已提交
589
    startup_reader = startup_blk.create_var(name=multi_file_reader_name)
F
fengjiayi 已提交
590 591
    startup_blk.append_op(
        type='open_files',
F
fengjiayi 已提交
592
        outputs={'Out': [startup_reader]},
F
fengjiayi 已提交
593 594 595 596
        attrs={
            'shape_concat': shape_concat,
            'lod_levels': lod_levels,
            'ranks': ranks,
F
fengjiayi 已提交
597
            'file_names': filenames,
598 599
            'thread_num': thread_num,
            'buffer_size': buffer_size
F
fengjiayi 已提交
600 601
        })

F
fengjiayi 已提交
602 603 604 605 606 607 608
    startup_reader.desc.set_dtypes(dtypes)
    startup_reader.persistable = True
    main_prog_reader = _copy_reader_var_(default_main_program().current_block(),
                                         startup_reader)
    if pass_num > 1:
        main_prog_reader = multi_pass(
            reader=main_prog_reader, pass_num=pass_num)
F
fengjiayi 已提交
609

F
fengjiayi 已提交
610 611 612
    return monkey_patch_reader_methods(main_prog_reader)


J
JiayiFeng 已提交
613
def __create_shared_decorated_reader__(op_type, reader, attrs):
Y
Yu Yang 已提交
614 615 616
    var_name = unique_name(op_type)
    startup_blk = default_startup_program().current_block()
    startup_var = startup_blk.create_var(name=var_name)
F
fengjiayi 已提交
617
    startop_op = startup_blk.append_op(
Y
Yu Yang 已提交
618 619 620 621 622
        type=op_type,
        inputs={'UnderlyingReader': reader},
        outputs={'Out': [startup_var]},
        attrs=attrs)
    startup_var.persistable = True
F
fengjiayi 已提交
623 624 625 626
    main_prog_block = default_main_program().current_block()
    main_prog_var = _copy_reader_var_(main_prog_block, startup_var)
    _copy_reader_create_op_(main_prog_block, startop_op)
    return monkey_patch_reader_methods(main_prog_var)
Y
Yu Yang 已提交
627 628


629 630
def __create_unshared_decorated_reader__(op_type, reader, attrs, name=None):
    new_reader_name = name if name is not None else unique_name(op_type)
631 632 633 634 635 636 637 638 639 640
    main_blk = default_main_program().current_block()
    new_reader = main_blk.create_var(name=new_reader_name)
    main_blk.append_op(
        type=op_type,
        inputs={'UnderlyingReader': reader},
        outputs={'Out': [new_reader]},
        attrs=attrs)
    return monkey_patch_reader_methods(new_reader)


F
fengjiayi 已提交
641
def shuffle(reader, buffer_size):
642 643 644
    """
    Shuffle the reader.
    """
645 646
    return __create_unshared_decorated_reader__(
        'create_shuffle_reader', reader, {'buffer_size': int(buffer_size)})
Y
Yu Yang 已提交
647 648


J
JiayiFeng 已提交
649
def batch(reader, batch_size):
F
fengjiayi 已提交
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
    """
    This layer is a reader decorator. It takes a reader and adds 
    'batching' decoration on it. When reading with the result 
    decorated reader, output data will be automatically organized 
    to the form of batches.

    Args:
        reader(Variable): The reader to be decorated with 'batching'.
        batch_size(int): The batch size.

    Returns:
        Variable: The reader which has been decorated with 'batching'.

    Examples:
        .. code-block:: python

            raw_reader = fluid.layers.io.open_files(filenames=['./data1.recordio',
                                                           './data2.recordio'],
                                                    shapes=[(3,224,224), (1)],
                                                    lod_levels=[0, 0],
                                                    dtypes=['float32', 'int64'],
                                                    thread_num=2,
                                                    buffer_size=2)
            batch_reader = fluid.layers.batch(reader=raw_reader, batch_size=5)

            # If we read data with the raw_reader:
            #     data = fluid.layers.read_file(raw_reader)
            # We can only get data instance by instance.
            # 
            # However, if we read data with the batch_reader:
            #     data = fluid.layers.read_file(batch_reader)
            # Each 5 adjacent instances will be automatically combined together 
            # to become a batch. So what we get('data') is a batch data instead 
            # of an instance.
    """
J
JiayiFeng 已提交
685 686 687 688
    return __create_unshared_decorated_reader__(
        'create_batch_reader', reader, {'batch_size': int(batch_size)})


689
def double_buffer(reader, place=None, name=None):
Y
yuyang18 已提交
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
    """
    Wrap a double buffer reader. The data will copy to target place with a
    double buffer queue. If the target place is None, the place that executor
    perform on will be used.

    Args:
        reader(Variable): the reader variable need to be wrapped.
        place(Place): the place of target data. Default is the sample place of
            executor perform.

        name(str): Variable name. None if the user does not care.

    Returns:
        wrapped reader with double buffer.

    Examples:

        >>> reader = fluid.layers.open_files(filenames=['somefile'],
        >>>                                  shapes=[[-1, 784], [-1, 1]],
        >>>                                  dtypes=['float32', 'int64'])
        >>> reader = fluid.layers.double_buffer(reader)
        >>> img, label = fluid.layers.read_file(reader)
    """
Y
Yu Yang 已提交
713 714 715
    attrs = dict()
    if place is not None:
        attrs['place'] = str(place).upper()
716 717
    return __create_unshared_decorated_reader__(
        'create_double_buffer_reader', reader, attrs, name=name)
Y
Yu Yang 已提交
718 719


F
fengjiayi 已提交
720
def multi_pass(reader, pass_num):
721 722
    return __create_shared_decorated_reader__(
        'create_multi_pass_reader', reader, {'pass_num': int(pass_num)})
F
fengjiayi 已提交
723 724


F
fengjiayi 已提交
725
def read_file(reader):
F
fengjiayi 已提交
726
    """
F
fengjiayi 已提交
727
    Execute the given reader and get data via it.
F
fengjiayi 已提交
728

F
fengjiayi 已提交
729
    A reader is also a Variable. It can be a raw reader generated by 
F
fengjiayi 已提交
730 731 732 733 734
    `fluid.layers.open_files()` or a decorated one generated by 
    `fluid.layers.double_buffer()` and so on.

    Args:

F
fengjiayi 已提交
735
        reader(Variable): The reader to execute.
F
fengjiayi 已提交
736 737

    Returns:
F
fengjiayi 已提交
738
        Tuple[Variable]: Data read via the given reader.
F
fengjiayi 已提交
739 740 741 742 743 744 745 746 747 748 749 750 751

    Examples:
        .. code-block:: python

           data_file = fluid.layers.open_files(
                filenames=['mnist.recordio'],
                shapes=[(-1, 748), (-1, 1)],
                lod_levels=[0, 0],
                dtypes=["float32", "int64"])
            data_file = fluid.layers.double_buffer(
                fluid.layers.batch(data_file, batch_size=64))
            input, label = fluid.layers.read_file(data_file)
    """
Y
Yu Yang 已提交
752 753 754 755
    helper = LayerHelper('read_file')
    out = [
        helper.create_tmp_variable(
            stop_gradient=True, dtype='float32')
F
fengjiayi 已提交
756
        for _ in range(len(reader.desc.shapes()))
Y
Yu Yang 已提交
757 758
    ]
    helper.append_op(
F
fengjiayi 已提交
759
        type='read', inputs={'Reader': [reader]}, outputs={'Out': out})
Y
Yu Yang 已提交
760 761 762 763
    if len(out) == 1:
        return out[0]
    else:
        return out
F
fengjiayi 已提交
764 765 766


class Preprocessor(object):
X
Xin Pan 已提交
767 768 769 770 771 772 773 774 775
    """
    A block for data pre-processing in reader.

    Args:
        reader (Variable): A reader variable.
        name (str, default None): The name of the reader.

    Examples:
          .. code-block:: python
X
Xin Pan 已提交
776

X
Xin Pan 已提交
777 778 779 780 781 782 783 784 785 786
            preprocessor = fluid.layers.io.Preprocessor(reader=reader)
            with preprocessor.block():
                img, lbl = preprocessor.inputs()
                img_out = img / 2
                lbl_out = lbl + 1
                preprocessor.outputs(img_out, lbl_out)

            data_file = fluid.layers.io.double_buffer(preprocessor())

    """
F
fengjiayi 已提交
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
    BEFORE_SUB_BLOCK = 0
    IN_SUB_BLOCK = 1
    AFTER_SUB_BLOCK = 2

    def __init__(self, reader, name=None):
        self.underlying_reader = reader
        new_reader_name = name if name is not None else unique_name(
            "create_custom_reader")
        self.main_prog = default_main_program()
        self.reader = self.main_prog.current_block().create_var(
            name=new_reader_name)
        self.sub_block = None
        self.source_var_names = None
        self.sink_var_names = None
        self.status = Preprocessor.BEFORE_SUB_BLOCK

    def is_completed(self):
        return self.sub_block and self.source_var_names and self.sink_var_names

    @contextlib.contextmanager
    def block(self):
        self.status = Preprocessor.IN_SUB_BLOCK
        self.sub_block = self.main_prog.create_block()
        yield
        self.main_prog.rollback()
        self.status = Preprocessor.AFTER_SUB_BLOCK
        if not self.is_completed():
            raise RuntimeError(
                "The definition of preprocessor is incompleted! "
                "Please make sure that you have set input and output "
                "variables by invoking 'inputs' and 'outputs' in "
                "Preprocessor's sub-block.")

    def inputs(self):
        if self.status != Preprocessor.IN_SUB_BLOCK:
            raise RuntimeError(
                "Preprocessor.inputs() can only be invoked inside the sub-block."
            )

        source_shapes = self.underlying_reader.desc.shapes()
        source_dtypes = self.underlying_reader.desc.dtypes()
        source_lod_levels = self.underlying_reader.desc.lod_levels()
F
fengjiayi 已提交
829 830 831 832
        self.source_var_names = [
            unique_name("preprocessor_source")
            for _ in xrange(len(source_shapes))
        ]
F
fengjiayi 已提交
833
        source_vars = []
F
fengjiayi 已提交
834 835 836
        for var_name, shape, dtype, lod_level in zip(
                self.source_var_names, source_shapes, source_dtypes,
                source_lod_levels):
F
fengjiayi 已提交
837
            source_vars.append(self.main_prog.current_block().create_var(
F
fengjiayi 已提交
838
                name=var_name, shape=shape, dtype=dtype, lod_level=lod_level))
F
fengjiayi 已提交
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
        return source_vars

    def outputs(self, *outs):
        if self.status != Preprocessor.IN_SUB_BLOCK:
            raise RuntimeError(
                "Preprocessor.outputs() can only be invoked inside the sub-block."
            )
        self.sink_var_names = [var.name for var in outs]

    def __call__(self, *args, **kwargs):
        if self.status != Preprocessor.AFTER_SUB_BLOCK:
            raise RuntimeError(
                "Preprocessor output can only be retrieved after rnn block.")

        self.main_prog.current_block().append_op(
            type="create_custom_reader",
            inputs={'UnderlyingReader': self.underlying_reader},
            outputs={'Out': [self.reader]},
            attrs={
                "sub_block": self.sub_block,
                "source_var_names": self.source_var_names,
                "sink_var_names": self.sink_var_names
            })
        return monkey_patch_reader_methods(self.reader)
Y
yuyang18 已提交
863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888


@templatedoc()
def load(out, file_path, load_as_fp16=None):
    """
    ${comment}

    >>> import paddle.fluid as fluid
    >>> tmp_tensor = fluid.layers.create_tensor(dtype='float32')
    >>> fluid.layers.load(tmp_tensor, "./tmp_tensor.bin")

    Args:
        out(${out_type}): ${out_comment}.

        file_path(${file_path_type}): ${file_path_comment}.

        load_as_fp16(${load_as_fp16_type}): ${load_as_fp16_comment}.

    Returns:
        None
    """
    helper = LayerHelper("load", **locals())
    attrs = {"file_path": file_path}
    if load_as_fp16 is not None:
        attrs['load_as_fp16'] = load_as_fp16
    helper.append_op(type="load", inputs={}, output={"Out": out}, args=attrs)