control_flow.py 171.1 KB
Newer Older
1
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

S
rename  
sneaxiy 已提交
15
from ..wrapped_decorator import signature_safe_contextmanager
D
dzhwinter 已提交
16

17
from .layer_function_generator import templatedoc
18
from .tensor import assign, cast, fill_constant
19
from .. import core
20 21 22 23 24 25 26 27 28
from ..framework import (
    Program,
    Variable,
    Operator,
    _non_static_mode,
    static_only,
    _in_legacy_dygraph,
    in_dygraph_mode,
)
29
from ..layer_helper import LayerHelper, unique_name
30 31 32 33 34 35 36 37 38 39 40
from .utils import (
    assert_same_structure,
    map_structure,
    hold_mutable_vars,
    copy_mutable_vars,
    padding_to_same_structure,
    is_sequence,
    pack_sequence_as,
    flatten,
    to_sequence,
)
Y
yuyang18 已提交
41
import numpy
42
import warnings
L
liym27 已提交
43
from functools import reduce, partial
44 45 46 47 48 49
from ..data_feeder import (
    convert_dtype,
    check_variable_and_dtype,
    check_type,
    check_dtype,
)
50
from ..backward import _infer_var_data_type_shape_
2
201716010711 已提交
51
import paddle
52
from paddle import _C_ops, _legacy_C_ops
D
dzhwinter 已提交
53

Q
QI JUN 已提交
54
__all__ = [
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
    'While',
    'Switch',
    'increment',
    'array_write',
    'create_array',
    'less_than',
    'less_equal',
    'greater_than',
    'greater_equal',
    'equal',
    'not_equal',
    'array_read',
    'array_length',
    'cond',
    'IfElse',
    'DynamicRNN',
    'StaticRNN',
    'reorder_lod_tensor_by_rank',
    'Print',
    'Assert',
    'is_empty',
    'case',
    'switch_case',
    'while_loop',
D
dzhwinter 已提交
79 80
]

Y
Yu Yang 已提交
81

82 83
def select_output(input, outputs, mask):
    """
84
    **select_output**
85 86 87 88 89 90 91 92 93 94 95 96 97 98
    This API takes in one input and multiple outputs and an integer mask. It
    selects the output specified by the mask and copy the input to selected
    output. It is useful in control flow.

    Args:
        input(Variable): The input variable
        outputs(tuple|list): The output variables
        mask(Variable): A tensor containing 1 integer number selecting which
            output to be copied with input

    Returns:
        Variable: The outputs variables
    """
    helper = LayerHelper('select_output', **locals())
99 100 101 102
    check_type(input, 'input', (Variable), 'select_output')
    check_variable_and_dtype(mask, 'mask', ['int32'], 'select_output')
    check_type(outputs, 'outputs', (list, tuple), 'select_output')

103 104 105 106 107
    helper.append_op(
        type='select_output',
        inputs={'X': input, 'Mask': mask},
        outputs={'Out': outputs},
    )
108 109 110
    return outputs


111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
def _select_input_infer_shape(first_shape, second_shape):
    """
    This function infer the output shape by following algorithm:
    1. if the dims is different, raise a error.
    2. compare axis one by one:
        if a == b: we set axis to a
        if a != b: we set axis to -1
    for compatibility,non declarative mode, we just return second_shape.
    """
    if len(first_shape) != len(second_shape):
        warnings.warn(
            f"the input shapes of select_input should have the same rank, but get {first_shape}, {second_shape}"
        )
        return second_shape
    out_shape = list(
126 127
        map(lambda a, b: a if a == b else -1, first_shape, second_shape)
    )
128 129 130
    return out_shape


131 132 133
def select_input(inputs, mask):
    """
    **select_input**
134

135 136 137 138 139 140 141 142 143 144 145 146
    This API takes in multiple inputs and uses an integer mask to select one
    input to output. It is useful in control flow.

    Args:
        inputs(tuple|list): The input variables
        mask(Variable): A tensor containing 1 integer number selecting which
            input to output

    Returns:
        Variable: The selected input variable
    """
    helper = LayerHelper('select_input', **locals())
147 148 149
    check_type(inputs, 'inputs', (list, tuple), 'select_input')
    check_variable_and_dtype(mask, 'mask', ['int32'], 'select_input')

150
    # Select input should expand the shape. If it is - 1 and valid number, use - 1 first. If the dim is different, an error will be reported directly
151
    # assert inputs[0].dtype == inputs[1].dtype, f"Expect the inputs should have the same dtype, but get {inputs[0].dtype} and {inputs[1].dtype}"
152 153 154
    output_shape = _select_input_infer_shape(inputs[0].shape, inputs[1].shape)
    output_dtype = inputs[1].dtype
    output_type = inputs[1].type
155

156 157 158 159 160 161 162 163
    out = helper.create_variable(
        dtype=output_dtype, shape=output_shape, type=output_type
    )
    helper.append_op(
        type='select_input',
        inputs={'X': inputs, 'Mask': mask},
        outputs={'Out': out},
    )
164 165 166
    return out


167
def select_input_with_buildin_type(inputs, mask, name):
168 169 170
    from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import (
        to_static_variable,
    )
171
    from paddle.fluid.dygraph.dygraph_to_static.utils import UndefinedVar
172

173 174
    false_var, true_var = inputs

175
    if isinstance(false_var, UndefinedVar) and isinstance(
176 177 178
        true_var, UndefinedVar
    ):
        """None -> UndefinedVar, so the real value is a [None, UndefinedVar] or [None, None], we just return None."""
179 180
        return None

181
    if isinstance(false_var, Variable) and isinstance(true_var, Variable):
182 183 184 185
        try:
            return select_input(inputs, mask)
        except Exception as e:
            raise RuntimeError(
186 187
                f"Exceptions throwed while doing select_input on {name}:\n{e}"
            )
188

189 190 191
    elif isinstance(false_var, support_ret_buildin_type) and isinstance(
        false_var, type(true_var)
    ):
192 193 194 195
        if false_var == true_var:
            return false_var
        else:
            inputs = [
196
                to_static_variable(false_var),
197
                to_static_variable(true_var),
198 199
            ]
    # Deal with the situations like this: false_var is int and true_var is Variable
200 201 202 203 204 205 206
    elif (
        isinstance(false_var, support_ret_buildin_type)
        and isinstance(true_var, Variable)
    ) or (
        isinstance(true_var, support_ret_buildin_type)
        and isinstance(false_var, Variable)
    ):
207 208 209
        inputs = [to_static_variable(false_var), to_static_variable(true_var)]
        warnings.warn(
            "Return results from different branches in cond are not same type: "
210
            "false_var returned by false_fn is '{}' and true_var of true_fn is "
211 212 213 214 215 216 217 218 219
            "'{}'".format(type(false_var), type(true_var))
        )
    elif (
        isinstance(false_var, UndefinedVar)
        and isinstance(true_var, (Variable,) + support_ret_buildin_type)
    ) or (
        isinstance(true_var, UndefinedVar)
        and isinstance(false_var, (Variable,) + support_ret_buildin_type)
    ):
220 221

        def create_var_if_not_undefined_var(a):
222 223
            if isinstance(a, UndefinedVar):
                return a
224 225
            return to_static_variable(a)

226
        true_var, false_var = to_static_variable(true_var), to_static_variable(
227 228
            false_var
        )
229
        inputs = [false_var, true_var]
230 231 232
    else:
        raise TypeError(
            "Unsupported return type of true_fn and false_fn in cond: false_var "
233
            "returned by false_fn is '{}' and true_var of true_fn is '{}'".format(
234 235 236
                type(false_var), type(true_var)
            )
        )
237 238 239 240
    try:
        return select_input(inputs, mask)
    except Exception as e:
        raise RuntimeError(
241 242
            f"Exceptions throwed while doing select_input on {name}:\n{e}"
        )
243 244


245
def split_lod_tensor(input, mask, level=0):
246 247 248 249
    """
    This function takes in an input that contains the complete lod information,
    and takes in a mask which is used to mask certain parts of the input.
    The output is the true branch and the false branch with the mask applied to
Q
qiaolongfei 已提交
250 251
    the input at a certain level in the tensor. Mainly used in IfElse to split
    data into two parts.
252 253

    Args:
254
        input(Variable|tuple|list|None): The input tensor that contains complete
255
                                lod information needed to construct the output.
256
        mask(Variable|list): A bool column vector which masks the input.
Q
qiaolongfei 已提交
257
        level(int): The specific lod level to split.
258 259

    Returns:
Q
qiaolongfei 已提交
260 261 262 263
        tuple(Variable, Variable):
        The true branch of tensor as per the mask applied to input.

        The false branch of tensor as per the mask applied to input.
264 265 266 267

    Examples:
        .. code-block:: python

268
          import paddle.fluid as fluid
Q
qiaolongfei 已提交
269
          x = fluid.layers.data(name='x', shape=[1])
270 271
          x.persistable = True

Q
qiaolongfei 已提交
272
          y = fluid.layers.data(name='y', shape=[1])
273 274
          y.persistable = True

Q
qiaolongfei 已提交
275
          out_true, out_false = fluid.layers.split_lod_tensor(
276
                input=x, mask=y, level=level)
277

278
    """
279 280 281 282 283 284
    check_type(
        input,
        'input',
        (Variable, list, tuple, type(None)),
        'fluid.layers.split_lod_tensor',
    )
285 286
    check_type(mask, 'mask', (Variable, list), 'fluid.layers.split_lod_tensor')
    check_type(level, 'level', int, 'fluid.layers.split_lod_tensor')
287
    helper = LayerHelper('split_lod_tensor', **locals())
X
Xin Pan 已提交
288 289
    out_true = helper.create_variable_for_type_inference(dtype=input.dtype)
    out_false = helper.create_variable_for_type_inference(dtype=input.dtype)
290 291 292 293 294 295 296 297 298
    helper.append_op(
        type='split_lod_tensor',
        inputs={
            'X': input,
            'Mask': mask,
        },
        outputs={'OutTrue': out_true, 'OutFalse': out_false},
        attrs={'level': level},
    )
299 300 301
    return out_true, out_false


302
def merge_lod_tensor(in_true, in_false, x, mask, level=0):
303 304 305 306 307
    """
    **merge_lod_tensor**

    This function takes in an input :math:`x`, the True branch, the False
    branch and a binary :math:`mask`. Using this information, this function
Q
qiaolongfei 已提交
308 309 310
    merges the True and False branches of the tensor into a single tensor as
    output at a certain lod level indicated by :math:`level`. Used in IfElse
    to merge the output if True block and False Block.
311 312

    Args:
313 314 315
        in_true(Variable|tuple|list|None): The True branch to be merged.
        in_false(Variable|tuple|list|None): The False branch to be merged.
        x(Variable|tuple|list|None): The input tensor that contains complete
316
                            lod information needed to construct the output.
317
        mask(Variable|list): A bool column vector which masks the input.
Q
qiaolongfei 已提交
318
        level(int): The specific lod level to merge.
319 320 321 322 323 324 325

    Returns:
        Variable: The merged output tensor.

    Examples:
        .. code-block:: python

326
          import paddle.fluid as fluid
327 328 329 330 331 332 333 334 335 336 337 338
          x = layers.data(
                      name='x', shape=[1], dtype='float32', stop_gradient=False)
          y = layers.data(
                name='y', shape=[1], dtype='bool', stop_gradient=False)

          level = 0

          out_true, out_false = layers.split_lod_tensor(
                input=x, mask=y, level=level)
          out = layers.merge_lod_tensor(
                in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
    """
339
    helper = LayerHelper('merge_lod_tensor', **locals())
340 341 342 343 344 345
    check_type(
        x,
        'x',
        (Variable, list, tuple, type(None)),
        'fluid.layers.merge_lod_tensor',
    )
346
    check_type(mask, 'mask', (Variable, list), 'fluid.layers.merge_lod_tensor')
347 348 349 350 351 352 353 354 355 356 357 358
    check_type(
        in_true,
        'in_true',
        (Variable, list, tuple, type(None)),
        'fluid.layers.merge_lod_tensor',
    )
    check_type(
        in_false,
        'in_false',
        (Variable, list, tuple, type(None)),
        'fluid.layers.merge_lod_tensor',
    )
X
Xin Pan 已提交
359
    out = helper.create_variable_for_type_inference(dtype=in_true.dtype)
360 361 362 363 364 365
    helper.append_op(
        type='merge_lod_tensor',
        inputs={'X': x, 'Mask': mask, 'InTrue': in_true, 'InFalse': in_false},
        outputs={'Out': out},
        attrs={'level': level},
    )
366 367 368
    return out


369
@static_only
370 371 372 373 374 375 376 377 378 379 380 381
def Print(
    input,
    first_n=-1,
    message=None,
    summarize=20,
    print_tensor_name=True,
    print_tensor_type=True,
    print_tensor_shape=True,
    print_tensor_layout=True,
    print_tensor_lod=True,
    print_phase='both',
):
Y
Yan Chunwei 已提交
382
    '''
383 384
    :api_attr: Static Graph

Y
Yan Chunwei 已提交
385 386 387 388 389 390 391 392 393
    **Print operator**

    This creates a print op that will print when a tensor is accessed.

    Wraps the tensor passed in so that whenever that a tensor is accessed,
    the message `message` is printed, along with the current value of the
    tensor `t`.

    Args:
Y
yangyaming 已提交
394
        input (Variable): A Tensor to print.
395
        summarize (int): Number of elements in the tensor to be print. If it's
T
tianshuo78520a 已提交
396
                value is -1, then all elements in the tensor will be print.
Y
yangyaming 已提交
397 398
        message (str): A string message to print as a prefix.
        first_n (int): Only log `first_n` number of times.
399 400 401
        print_tensor_name (bool, optional): Print the tensor name. Default: True.
        print_tensor_type (bool, optional): Print the tensor type. Defaultt: True.
        print_tensor_shape (bool, optional): Print the tensor shape. Default: True.
402
        print_tensor_layout (bool, optional): Print the tensor layout. Default: True.
403
        print_tensor_lod (bool, optional): Print the tensor lod. Default: True.
404
        print_phase (str): Which phase to displace, including 'forward',
405
                'backward' and 'both'. Default: 'both'. If set to 'backward', will
406 407
                only print the gradients of input tensor; If set to 'both', will
                both print the input tensor itself and the gradients of input tensor.
Y
Yan Chunwei 已提交
408 409

    Returns:
410
        Variable: Output tensor.
Y
Yan Chunwei 已提交
411

412 413 414 415
    NOTES:
        The input and output are two different variables, and in the
        following process, you should use the output variable but not the input,
        otherwise, the print layer doesn't have backward.
Y
Yan Chunwei 已提交
416

Y
Yan Chunwei 已提交
417 418
    Examples:
        .. code-block:: python
419

420 421 422
           import paddle

           paddle.enable_static()
423

424 425 426 427 428 429 430 431 432 433 434 435 436 437
           x = paddle.full(shape=[2, 3], fill_value=3, dtype='int64')
           out = paddle.static.Print(x, message="The content of input layer:")

           main_program = paddle.static.default_main_program()
           exe = paddle.static.Executor(place=paddle.CPUPlace())
           res = exe.run(main_program, fetch_list=[out])
           # Variable: fill_constant_1.tmp_0
           #   - message: The content of input layer:
           #   - lod: {}
           #   - place: CPUPlace
           #   - shape: [2, 3]
           #   - layout: NCHW
           #   - dtype: long
           #   - data: [3 3 3 3 3 3]
Y
Yan Chunwei 已提交
438
    '''
439 440 441 442 443 444
    check_variable_and_dtype(
        input,
        'input',
        ['float32', 'float64', 'int32', 'int64', 'bool'],
        'fluid.layers.Print',
    )
445

446 447
    helper = LayerHelper('print' + "_" + input.name, **locals())
    output = helper.create_variable_for_type_inference(input.dtype)
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
    helper.append_op(
        type='print',
        inputs={'In': input},
        outputs={'Out': output},
        attrs={
            'first_n': first_n,
            'summarize': summarize,
            'message': message or "",
            'print_tensor_name': print_tensor_name,
            'print_tensor_type': print_tensor_type,
            'print_tensor_shape': print_tensor_shape,
            'print_tensor_layout': print_tensor_layout,
            'print_tensor_lod': print_tensor_lod,
            'print_phase': print_phase.upper(),
        },
    )
464
    return output
Y
Yan Chunwei 已提交
465 466


H
Huihuang Zheng 已提交
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
def Assert(cond, data=None, summarize=20, name=None):
    '''
    This API creates an op that asserts the given condition is true. If the
    condition is false, prints the tensors in data. ``summarize`` specifies the
    number of the elements in the tensors to print.

    Args:
        cond (Variable): The boolean condition tensor whose numel should be 1.
        data (list|tuple, optional): list or tuple of tensors to print when
            condition is not true. If it's ``None``, no tensor will be printed.
            The default value is ``None``.
        summarize (int, optional): Number of elements in the tensor to be
            printed. If its value is -1, then all elements in the tensor will
            be printed. The default value is 20.
        name (str, optional): The default value is ``None`` . Normally users
            don't have to set this parameter. For more information, please
            refer to :ref:`api_guide_Name` .

    Returns:
        Operator: the created operation.

    Raises:
        TypeError: If ``cond`` is not boolean Variable.
        TypeError: If ``data`` is not a list or tuple or ``None``.
        TypeError: If ``summarize`` is not int.
        TypeError: If ``name`` is not a string or ``None`` .
        fluid.core.EnforceNotMet: If the condition is False in running time.

    Examples:
        .. code-block:: python

            import paddle.fluid as fluid
            import paddle.fluid.layers as layers

            x = layers.fill_constant(shape=[2, 3], dtype='float32', value=2.0)
            condition = layers.reduce_max(x) < 1.0 # False
            layers.Assert(condition, [x], 10, "example_assert_layer")

            exe = fluid.Executor()
            try:
                exe.run(fluid.default_main_program())
                # Print x and throws paddle.fluid.core.EnforceNotMet exception
                # Example printed message for x:
                #
                # Variable: fill_constant_0.tmp_0
                #   - lod: {}
                #   - place: CPUPlace()
                #   - shape: [2, 3]
                #   - layout: NCHW
                #   - dtype: float
                #   - data: [2 2 2 2 2 2]
            except fluid.core.EnforceNotMet as e:
                print("Assert Exception Example")

    '''
    check_variable_and_dtype(cond, "cond", ["bool"], "fluid.layers.Assert")
    check_type(data, "data", (list, tuple, type(None)), "fluid.layers.Assert")
    check_type(summarize, "summarize", int, "fluid.layers.Assert")
    check_type(name, "name", (str, type(None)), "fluid.layers.Assert")

    layer_name = name if name else ('assert_' + cond.name)
    helper = LayerHelper(layer_name, **locals())

530 531 532 533 534
    op = helper.append_op(
        type="assert",
        inputs={"Cond": cond, "Data": [] if data is None else list(data)},
        attrs={"summarize": summarize},
    )
H
Huihuang Zheng 已提交
535 536 537 538

    return op


539
class BlockGuard:
Y
Yu Yang 已提交
540
    """
541 542 543 544
    BlockGuard class.

    BlockGuard class is used to create a sub-block in a program by
    using the Python `with` keyword.
Y
Yu Yang 已提交
545 546
    """

547 548
    def __init__(self, main_program):
        if not isinstance(main_program, Program):
Y
Yu Yang 已提交
549
            raise TypeError("BlockGuard takes a program")
550
        self.main_program = main_program
Y
Yu Yang 已提交
551 552

    def __enter__(self):
W
Wu Yi 已提交
553
        self.main_program._create_block()
Y
Yu Yang 已提交
554 555

    def __exit__(self, exc_type, exc_val, exc_tb):
W
Wu Yi 已提交
556
        self.main_program._rollback()
Y
Yu Yang 已提交
557 558 559 560 561
        if exc_type is not None:
            return False  # re-raise exception
        return True


Y
Yang Yang 已提交
562 563 564 565 566
class BlockGuardWithCompletion(BlockGuard):
    """
    BlockGuardWithCompletion class.

    BlockGuardWithCompletion class is used to create an op with a block in a program.
567 568
    """

Y
Yu Yang 已提交
569
    def __init__(self, rnn):
X
Xin Pan 已提交
570
        if not isinstance(rnn, StaticRNN):
X
Xin Pan 已提交
571
            raise TypeError("BlockGuardWithCompletion takes a StaticRNN")
572
        super().__init__(rnn.helper.main_program)
Y
Yu Yang 已提交
573 574 575 576
        self.rnn = rnn

    def __enter__(self):
        self.rnn.status = StaticRNN.IN_RNN_BLOCK
577
        return super().__enter__()
Y
Yu Yang 已提交
578 579

    def __exit__(self, exc_type, exc_val, exc_tb):
Y
Yu Yang 已提交
580 581
        if exc_type is not None:
            return False
Y
Yu Yang 已提交
582
        self.rnn.status = StaticRNN.AFTER_RNN_BLOCK
583
        self.rnn._complete_op()
584
        return super().__exit__(exc_type, exc_val, exc_tb)
Y
Yu Yang 已提交
585 586


587
class StaticRNNMemoryLink:
Y
Yu Yang 已提交
588
    """
589 590 591 592
    StaticRNNMemoryLink class.

    StaticRNNMemoryLink class is used to create a link between two
    memory cells of a StaticRNN.
Y
yuyang18 已提交
593 594 595 596 597 598 599 600 601


    NOTE: This is a internal data structure of a very low-level API.
    Please use StaticRNN instead.

    Args:
        init(Variable): the initial variable for Memory.
        pre_mem(Variable): the memory variable in previous time step.
        mem(Variable): the memory variable in current time step.
Y
Yu Yang 已提交
602 603 604 605 606 607 608 609
    """

    def __init__(self, init, pre_mem, mem=None):
        self.init = init
        self.pre_mem = pre_mem
        self.mem = mem


610
class StaticRNN:
611
    """
612 613
    :api_attr: Static Graph

614 615
    StaticRNN class.

616 617 618 619 620 621 622
    The StaticRNN can process a batch of sequence data. The first dimension of inputs
    represents sequence length, the length of each input sequence must be equal.
    StaticRNN will unfold sequence into time steps, user needs to define how to process
    each time step during the :code:`with` step.

    Args:
        name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
C
chengduo 已提交
623 624

    Examples:
625 626
        .. code-block:: python

627
            import paddle
628 629 630 631
            import paddle.fluid as fluid
            import paddle.fluid.layers as layers

            vocab_size, hidden_size=10000, 200
632
            paddle.enable_static()
633 634
            x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
            # create word sequence
635 636 637 638 639
            x_emb = layers.embedding(
                input=x,
                size=[vocab_size, hidden_size],
                dtype='float32',
                is_sparse=False)
640
            # transform batch size to dim 1
641
            x_emb = paddle.transpose(x_emb, perm=[1, 0, 2])
642 643 644

            rnn = fluid.layers.StaticRNN()
            with rnn.step():
645
                # mark created x_emb as input, each step process a word
646
                word = rnn.step_input(x_emb)
647
                # create prev memory parameter, batch size comes from word
648 649
                prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
                hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
650 651
                # use hidden to update prev
                rnn.update_memory(prev, hidden)
652
                # mark hidden as output
653
                rnn.step_output(hidden)
654
            # get StaticrNN final output
655
            result = rnn()
C
chengduo 已提交
656

657
    """
658

Y
Yu Yang 已提交
659 660 661 662
    BEFORE_RNN_BLOCK = 0
    IN_RNN_BLOCK = 1
    AFTER_RNN_BLOCK = 2

663
    def __init__(self, name=None):
664
        check_type(name, "name", (str, type(None)), "fluid.layers.StaticRNN")
665
        self.helper = LayerHelper("static_rnn", name=name)
Y
Yu Yang 已提交
666 667 668 669 670 671 672 673
        self.memories = {}  # memory map, from pre_mem.name --> MemoryLink
        self.inputs = []  # input variable list in current block
        self.outputs = []  # output variable list in parent block
        self.status = StaticRNN.BEFORE_RNN_BLOCK  # status flag.
        # sequence length, since it is a static RNN, sequence length are fixed.
        self.seq_len = None

    def step(self):
C
chengduo 已提交
674
        """
675 676
        Define operators in each step. step is used in :code:`with` block, OP in :code:`with` block
        will be executed sequence_len times (sequence_len is the length of input)
C
chengduo 已提交
677
        """
Y
Yang Yang 已提交
678
        return BlockGuardWithCompletion(self)
Y
Yu Yang 已提交
679 680 681 682 683

    def _assert_in_rnn_block_(self, method):
        if self.status != StaticRNN.IN_RNN_BLOCK:
            raise ValueError("You must invoke {0} in rnn block".format(method))

684 685 686 687 688 689 690 691 692
    def memory(
        self,
        init=None,
        shape=None,
        batch_ref=None,
        init_value=0.0,
        init_batch_dim_idx=0,
        ref_batch_dim_idx=1,
    ):
693
        """
C
chengduo 已提交
694 695 696
        Create a memory variable for static rnn.
        If the :code:`init` is not None, :code:`memory` will be initialized by
        this Variable. If the :code:`init` is None, :code:`shape` and :code:`batch_ref`
697 698
        must be set, and this function will create a new variable with shape and batch_ref
        to initialize :code:`init` Variable.
C
chengduo 已提交
699

700
        Args:
701
            init(Variable, optional): Tensor used to init memory. If it is not set,
C
chengduo 已提交
702 703
                :code:`shape` and :code:`batch_ref` must be provided.
                Default: None.
704 705 706 707 708 709 710
            shape(list|tuple): When :code:`init` is None use this arg to initialize memory shape.
            NOTE the shape does not contain batch_size. Default: None.
            batch_ref(Variable, optional): When :code:`init` is None, memory's batch size will
            be set as batch_ref's ref_batch_dim_idx value. Default: None.
            init_value(float, optional): When :code:`init` is None, used to init memory's value. Default: 0.0.
            init_batch_dim_idx(int, optional): the batch_size axis of the :code:`init` Variable. Default: 0.
            ref_batch_dim_idx(int, optional): the batch_size axis of the :code:`batch_ref` Variable. Default: 1.
C
chengduo 已提交
711 712

        Returns:
713 714 715 716 717
            Variable: The memory variable.

        Examples 1:
            .. code-block:: python

718
                import paddle
719 720 721 722
                import paddle.fluid as fluid
                import paddle.fluid.layers as layers

                vocab_size, hidden_size=10000, 200
723
                paddle.enable_static()
724 725 726 727 728 729 730 731
                x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
                # create word sequence
                x_emb = layers.embedding(
                        input=x,
                        size=[vocab_size, hidden_size],
                        dtype='float32',
                        is_sparse=False)
                # transform batch size to dim 1
732
                x_emb = paddle.transpose(x_emb, perm=[1, 0, 2])
733 734 735 736 737 738 739 740 741 742

                rnn = fluid.layers.StaticRNN()
                with rnn.step():
                        # mark created x_emb as input, each step process a word
                        word = rnn.step_input(x_emb)
                        # create prev memory parameter, batch size comes from word
                        prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
                        hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
                        # use hidden to update prev
                        rnn.update_memory(prev, hidden)
743 744 745


        Examples 2:
746 747
            .. code-block:: python

748
                import paddle
749 750 751
                import paddle.fluid as fluid
                import paddle.fluid.layers as layers
                vocab_size, hidden_size=10000, 200
752
                paddle.enable_static()
753 754 755 756 757 758 759 760
                x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
                # create word sequence
                x_emb = layers.embedding(
                        input=x,
                        size=[vocab_size, hidden_size],
                        dtype='float32',
                        is_sparse=False)
                # transform batch size to dim 1
761
                x_emb = paddle.transpose(x_emb, perm=[1, 0, 2])
762 763 764 765 766 767 768 769 770 771
                boot_memory = fluid.layers.data(name='boot', shape=[hidden_size], dtype='float32', lod_level=1)
                rnn = fluid.layers.StaticRNN()
                with rnn.step():
                        # mark created x_emb as input, each step process a word
                        word = rnn.step_input(x_emb)
                        # init memory
                        prev = rnn.memory(init=boot_memory)
                        hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
                        # update hidden with prev
                        rnn.update_memory(prev, hidden)
772

773
        """
Y
Yu Yang 已提交
774
        self._assert_in_rnn_block_('memory')
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
        check_type(
            init,
            "init",
            (Variable, type(None)),
            "fluid.layers.StaticRNN.memory",
        )
        check_type(
            shape,
            "shape",
            (list, tuple, type(None)),
            "fluid.layers.StaticRNN.memory",
        )
        check_type(
            batch_ref,
            "batch_ref",
            (Variable, type(None)),
            "fluid.layers.StaticRNN.memory",
        )
Y
Yu Yang 已提交
793
        if init is None:
794
            if shape is None or batch_ref is None:
Y
Yu Yang 已提交
795
                raise ValueError(
796 797
                    "if init is None, memory at least need shape and batch_ref"
                )
798
            parent_block = self._parent_block()
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
            var_name = unique_name.generate_with_ignorable_key(
                "@".join([self.helper.name, "memory_boot"])
            )
            boot_var = parent_block.create_var(
                name=var_name,
                shape=shape,
                dtype=batch_ref.dtype,
                persistable=False,
            )

            parent_block.append_op(
                type="fill_constant_batch_size_like",
                inputs={'Input': [batch_ref]},
                outputs={'Out': [boot_var]},
                attrs={
                    'value': init_value,
                    'shape': boot_var.shape,
                    'dtype': boot_var.dtype,
                    'input_dim_idx': ref_batch_dim_idx,
                    'output_dim_idx': init_batch_dim_idx,
                },
            )
Y
Yu Yang 已提交
821 822 823 824

            return self.memory(init=boot_var)
        else:
            pre_mem = self.helper.create_variable(
825 826 827
                name=unique_name.generate_with_ignorable_key(
                    "@".join([self.helper.name, "mem"])
                ),
F
fengjiayi 已提交
828
                dtype=init.dtype,
829 830 831 832 833
                shape=init.shape,
            )
            self.memories[pre_mem.name] = StaticRNNMemoryLink(
                init=init, pre_mem=pre_mem
            )
Y
Yu Yang 已提交
834 835 836
            return pre_mem

    def step_input(self, x):
C
chengduo 已提交
837 838 839 840 841 842 843 844
        """
        Mark a sequence as a StaticRNN input.

        Args:
            x(Variable): The input sequence, the shape of x
                should be [seq_len, ...].

        Returns:
845 846 847 848 849
            Variable: The current time step data in the input sequence.

        Examples:
            .. code-block:: python

850
                import paddle
851 852 853 854
                import paddle.fluid as fluid
                import paddle.fluid.layers as layers

                vocab_size, hidden_size=10000, 200
855
                paddle.enable_static()
856 857 858 859 860 861 862 863
                x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
                # create word sequence
                x_emb = layers.embedding(
                        input=x,
                        size=[vocab_size, hidden_size],
                        dtype='float32',
                        is_sparse=False)
                # transform batch size to dim 1
864
                x_emb = paddle.transpose(x_emb, perm=[1, 0, 2])
865 866 867 868 869 870 871 872 873 874

                rnn = fluid.layers.StaticRNN()
                with rnn.step():
                        # mark created x_emb as input, each step process a word
                        word = rnn.step_input(x_emb)
                        # create prev memory parameter, batch size comes from word
                        prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
                        hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
                        # use hidden to update prev
                        rnn.update_memory(prev, hidden)
875

C
chengduo 已提交
876
        """
Y
Yu Yang 已提交
877
        self._assert_in_rnn_block_('step_input')
878
        check_type(x, "x", Variable, "fluid.layers.StaticRNN.step_input")
Y
Yu Yang 已提交
879
        if self.seq_len is None:
Y
Yu Yang 已提交
880
            self.seq_len = x.shape[0]
881
        elif x.shape[0] != -1 and self.seq_len != x.shape[0]:
Y
Yu Yang 已提交
882 883
            raise ValueError("Static RNN only take fix seq_len input")

884 885 886
        ipt = self.helper.create_variable(
            name=x.name, dtype=x.dtype, shape=list(x.shape[1:]), type=x.type
        )
Y
Yu Yang 已提交
887 888 889 890
        self.inputs.append(ipt)
        return ipt

    def step_output(self, o):
C
chengduo 已提交
891 892 893 894 895 896 897 898
        """
        Mark a sequence as a StaticRNN output.

        Args:
            o(Variable): The output sequence.

        Returns:
            None.
899 900 901 902

        Examples:
            .. code-block:: python

903
                import paddle
904 905 906 907
                import paddle.fluid as fluid
                import paddle.fluid.layers as layers

                vocab_size, hidden_size=10000, 200
908
                paddle.enable_static()
909 910 911 912 913 914 915 916
                x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
                # create word sequence
                x_emb = layers.embedding(
                        input=x,
                        size=[vocab_size, hidden_size],
                        dtype='float32',
                        is_sparse=False)
                # transform batch size to dim 1
917
                x_emb = paddle.transpose(x_emb, perm=[1, 0, 2])
918 919 920 921 922 923 924 925 926 927 928 929 930

                rnn = fluid.layers.StaticRNN()
                with rnn.step():
                        # mark created x_emb as input, each step process a word
                        word = rnn.step_input(x_emb)
                        # create prev memory parameter, batch size comes from word
                        prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
                        hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
                        # use hidden to update prev
                        rnn.update_memory(prev, hidden)
                        rnn.step_output(hidden)

                result = rnn()
931

C
chengduo 已提交
932
        """
Y
Yu Yang 已提交
933
        self._assert_in_rnn_block_('step_output')
934
        check_type(o, "o", Variable, "fluid.layers.StaticRNN.step_output")
Y
Yu Yang 已提交
935

X
Xin Pan 已提交
936
        tmp_o = self.helper.create_variable_for_type_inference(dtype=o.dtype)
937 938 939 940 941 942
        self.helper.append_op(
            type='rnn_memory_helper',
            inputs={'X': [o]},
            outputs={'Out': tmp_o},
            attrs={'dtype': o.dtype},
        )
Y
Yu Yang 已提交
943

944 945 946 947 948
        out_var = self._parent_block().create_var(
            name=tmp_o.name,
            shape=[self.seq_len] + list(tmp_o.shape),
            dtype=tmp_o.dtype,
        )
Y
Yu Yang 已提交
949 950 951 952

        self.outputs.append(out_var)

    def output(self, *outputs):
C
chengduo 已提交
953 954 955 956
        """
        Mark the StaticRNN output variables.

        Args:
957
            outputs: The output Tensor, can mark multiple variables as output
C
chengduo 已提交
958 959 960

        Returns:
            None
961 962 963 964

        Examples:
            .. code-block:: python

965
                import paddle
966 967 968 969
                import paddle.fluid as fluid
                import paddle.fluid.layers as layers

                vocab_size, hidden_size=10000, 200
970
                paddle.enable_static()
971 972 973 974 975 976 977 978
                x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
                # create word sequence
                x_emb = layers.embedding(
                        input=x,
                        size=[vocab_size, hidden_size],
                        dtype='float32',
                        is_sparse=False)
                # transform batch size to dim 1
979
                x_emb = paddle.transpose(x_emb, perm=[1, 0, 2])
980 981 982 983 984 985 986 987 988 989 990 991 992 993

                rnn = fluid.layers.StaticRNN()
                with rnn.step():
                        # mark created x_emb as input, each step process a word
                        word = rnn.step_input(x_emb)
                        # create prev memory parameter, batch size comes from word
                        prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
                        hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
                        # use hidden to update prev
                        rnn.update_memory(prev, hidden)
                        # mark each step's hidden and word as output
                        rnn.output(hidden, word)

                result = rnn()
C
chengduo 已提交
994
        """
Y
Yu Yang 已提交
995 996 997 998
        for each in outputs:
            self.step_output(each)

    def update_memory(self, mem, var):
C
chengduo 已提交
999
        """
1000
        Update the memory from :code:`mem` to :code:`var`.
C
chengduo 已提交
1001 1002 1003

        Args:
            mem(Variable): the memory variable.
1004
            var(Variable): the plain variable generated in RNN block, used to update memory.
T
tianshuo78520a 已提交
1005
                           var and mem should have same dims and data type.
C
chengduo 已提交
1006 1007 1008

        Returns:
            None
1009

C
chengduo 已提交
1010
        """
1011 1012
        check_type(mem, "mem", Variable, "fluid.layers.StaticRNN.update_memory")
        check_type(var, "var", Variable, "fluid.layers.StaticRNN.update_memory")
Y
Yu Yang 已提交
1013 1014
        self.memories[mem.name].mem = var

1015
    def _parent_block(self):
1016
        prog = self.helper.main_program
Y
Yu Yang 已提交
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
        parent_idx = prog.current_block().parent_idx
        assert parent_idx >= 0
        parent_block = prog.block(parent_idx)
        return parent_block

    def __call__(self, *args, **kwargs):
        if self.status != StaticRNN.AFTER_RNN_BLOCK:
            raise ValueError("RNN output can only be retrieved after rnn block")
        if len(self.outputs) == 0:
            raise ValueError("RNN has no output")
        elif len(self.outputs) == 1:
            return self.outputs[0]
        else:
            return self.outputs

1032
    def _complete_op(self):
1033 1034
        main_program = self.helper.main_program
        rnn_block = main_program.current_block()
1035
        parent_block = self._parent_block()
Y
Yu Yang 已提交
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049

        local_inputs = set()

        for op in rnn_block.ops:
            assert isinstance(op, Operator)
            for oname in op.output_names:
                for out_var_name in op.output(oname):
                    local_inputs.add(out_var_name)

        for var in self.inputs:
            local_inputs.add(var.name)
        for m in self.memories:
            local_inputs.add(m)

C
chengduo 已提交
1050 1051 1052
        # NOTE(zcd): the params have two categories of variables.
        #   - the variables that are the out of StaticRnn.
        #   - the variables that are the parameters of some layers, for example, conv2d.
Y
Yu Yang 已提交
1053 1054 1055 1056 1057 1058 1059 1060
        params = list()
        for op in rnn_block.ops:
            assert isinstance(op, Operator)
            for iname in op.input_names:
                for in_var_name in op.input(iname):
                    if in_var_name not in local_inputs:
                        params.append(in_var_name)

1061 1062 1063
        parameters = [
            parent_block._find_var_recursive(name) for name in set(params)
        ]
Y
Yu Yang 已提交
1064 1065

        step_scope = parent_block.create_var(
1066 1067
            type=core.VarDesc.VarType.STEP_SCOPES
        )
Y
Yu Yang 已提交
1068 1069 1070 1071

        inlinks = [parent_block.var(i.name) for i in self.inputs]
        outlinks = self.outputs

C
chengduo 已提交
1072
        # NOTE(zcd): the states maybe empty in some case.
Y
Yu Yang 已提交
1073 1074 1075
        boot_memories = []
        pre_memories = []
        memories = []
1076
        for _, mem in self.memories.items():
Y
Yu Yang 已提交
1077 1078
            boot_memories.append(mem.init)
            pre_memories.append(mem.pre_mem.name)
1079 1080 1081
            assert (
                mem.mem is not None
            ), "%s should be updated in every step." % (mem.init.name)
Y
Yu Yang 已提交
1082 1083
            mem_var = rnn_block.var(mem.mem.name)
            assert isinstance(mem_var, Variable)
X
Xin Pan 已提交
1084
            new_mem = self.helper.create_variable_for_type_inference(
1085 1086 1087 1088 1089 1090 1091 1092
                dtype=mem_var.dtype
            )
            rnn_block.append_op(
                type='rnn_memory_helper',
                inputs={'X': [mem_var]},
                outputs={'Out': [new_mem]},
                attrs={'dtype': mem_var.dtype},
            )
Y
Yu Yang 已提交
1093 1094 1095

            memories.append(new_mem.name)

1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
        parent_block.append_op(
            type='recurrent',
            inputs={
                'inputs': inlinks,
                'initial_states': boot_memories,
                'parameters': parameters,
            },
            outputs={'outputs': outlinks, 'step_scopes': [step_scope]},
            attrs={
                'has_states': len(pre_memories) > 0,
                'ex_states': pre_memories,
                'states': memories,
                'sub_block': rnn_block,
            },
        )
Y
Yu Yang 已提交
1111 1112


Y
Yang Yang(Tony) 已提交
1113 1114 1115 1116
class WhileGuard(BlockGuard):
    def __init__(self, while_op):
        if not isinstance(while_op, While):
            raise TypeError("WhileGuard takes a while op")
1117
        super().__init__(while_op.helper.main_program)
Y
Yang Yang(Tony) 已提交
1118 1119 1120 1121
        self.while_op = while_op

    def __enter__(self):
        self.while_op.status = While.IN_WHILE_BLOCK
1122
        return super().__enter__()
Y
Yang Yang(Tony) 已提交
1123 1124 1125 1126 1127

    def __exit__(self, exc_type, exc_val, exc_tb):
        if exc_type is not None:
            return False
        self.while_op.status = While.AFTER_WHILE_BLOCK
1128
        self.while_op._complete()
1129
        return super().__exit__(exc_type, exc_val, exc_tb)
Y
Yang Yang(Tony) 已提交
1130 1131


1132 1133 1134
def get_inputs_outputs_in_block(
    current_block, inner_inputs, inner_outputs, helper
):
1135 1136 1137 1138 1139 1140 1141 1142
    """
    Find inputs and outputs in current control flow block.
    :param current_block: Current control flow block.
    :param inner_inputs: Input var name of ops in current block.
    :param inner_outputs: Output var name of ops in current block.
    :return: inner_inputs, inner_outputs
    """

1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
    def is_ignore_vars(op, var_name):
        # NOTE(dev): There are some persistable var created in some non-standard API
        # such as "contrib.layers.shuffle_batch". It create a "Seed" used both in
        # Input and Output. This var shall not be considered as a loop_var in
        # control_flow.
        IGNORE_VAR_NAMES = {"shuffle_batch": ["shuffle_batch_seed"]}
        if op.type in IGNORE_VAR_NAMES:
            var_names = IGNORE_VAR_NAMES[op.type]
            for name in var_names:
                if name in var_name:
                    return True
        return False

1156 1157 1158 1159 1160 1161 1162 1163
    # Step1: update inner_inputs and inner_outputs
    # NOTE: Here assumes that all variables are input or output of Ops,
    # but some variables are created without appendding a real op.
    # For example, in `arr = create_array(dtype)`, `arr` is not a output of a op.
    for op in current_block.ops:
        assert isinstance(op, Operator)
        for iname in op.input_names:
            for in_var_name in op.input(iname):
1164
                if in_var_name not in inner_outputs and not is_ignore_vars(
1165 1166
                    op, in_var_name
                ):
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
                    inner_inputs.add(in_var_name)

        for oname in op.output_names:
            for out_var_name in op.output(oname):
                inner_outputs.add(out_var_name)

    # Step2: Remove LOD_TENSOR_ARRAY created in current control flow block.
    remove_inner_inputs = set()
    parent_block = helper.main_program.block(current_block.parent_idx)

    for in_var_name in inner_inputs:
        parent_block_var = parent_block._find_var_recursive(in_var_name)
        current_block_var = None
        if current_block.has_var(in_var_name):
            current_block_var = current_block.var(in_var_name)
1182 1183 1184 1185 1186
        if (
            not parent_block_var
            and current_block_var
            and current_block_var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY
        ):
1187 1188 1189 1190 1191 1192 1193
            remove_inner_inputs.add(in_var_name)

    inner_inputs = inner_inputs - remove_inner_inputs

    return inner_inputs, inner_outputs


1194
class While:
X
Xin Pan 已提交
1195
    """
1196
    :api_attr: Static Graph
1197

1198
    while loop control flow. Repeat while body until cond is False.
X
Xin Pan 已提交
1199

1200 1201 1202 1203
    Note:
        A new OP :ref:`api_fluid_layers_while_loop` is highly recommended instead of ``While`` if the shape of parameter ``cond`` is [1].
        OP :ref:`api_fluid_layers_while_loop` is easier to use and is called with less code but does the same thing as ``While`` .

1204 1205 1206 1207 1208 1209
    Notice:
        Local variables created in ``While`` are similar to that created in while of C++, and cannot be referenced externally.
        As a result, they cannot be obtained through ``fetch_list`` of ``Executor``. If you would like to access the variable
        out of ``while`` , PaddlePaddle provides ``assign`` API to assign local variables to external. Please refer to example
        code 2 or refer to `issue#22724 <https://github.com/PaddlePaddle/Paddle/issues/22724>`_.

X
Xin Pan 已提交
1210
    Args:
1211
        cond(Variable): A Tensor whose data type is bool controlling whether to continue looping.
G
guofei 已提交
1212
        is_test(bool, optional): A flag indicating whether execution is in test phase. Default value is False.
1213
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name` .
X
Xin Pan 已提交
1214

1215
    Examples 1:
X
Xin Pan 已提交
1216
          .. code-block:: python
1217

1218
            import paddle.fluid as fluid
1219 1220 1221 1222 1223
            import numpy as np

            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)           # loop counter

            loop_len = fluid.layers.fill_constant(shape=[1],dtype='int64', value=10)    # loop length
1224

1225
            cond = fluid.layers.less_than(x=i, y=loop_len)
1226
            while_op = fluid.layers.While(cond=cond)
1227
            with while_op.block():
1228
                i = fluid.layers.increment(x=i, value=1, in_place=True)
1229
                fluid.layers.less_than(x=i, y=loop_len, cond=cond)
1230 1231 1232 1233 1234

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())

            res = exe.run(fluid.default_main_program(), feed={}, fetch_list=[i])
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
            print(res) # [array([10])]


    Examples 2:
          .. code-block:: python

            import paddle.fluid as fluid
            import numpy as np

            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            loop_len = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
            one = fluid.layers.fill_constant(shape=[1], dtype='float32', value=1)
            data = fluid.data(name='data', shape=[1], dtype='float32')
            sums = fluid.layers.fill_constant(shape=[1], dtype='float32', value=0)  # Define the variable to be obtained ouside of While, which name should be different from the variable inside the While to be obtained

            cond = fluid.layers.less_than(x=i, y=loop_len)
            while_op = fluid.layers.While(cond=cond)
            with while_op.block():
                sums_tensor = fluid.layers.elementwise_add(x=data, y=data)
                fluid.layers.assign(sums_tensor, sums)  # Update the value of sums_tensor defined in While to the sums which defined outside of While through layers.assign
                i = fluid.layers.increment(x=i, value=1, in_place=True)
                data = fluid.layers.elementwise_add(x=data, y=one)
                fluid.layers.less_than(x=i, y=loop_len, cond=cond)

            feed_data = np.ones(1).astype('float32')
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
            res = exe.run(fluid.default_main_program(), feed={'data': feed_data}, fetch_list=sums)
            print(res[0])  # [2.]    # Because the data in While does not update the value outside the While, the value of sums is [2.] after the loop
X
Xin Pan 已提交
1264 1265
    """

Y
Yang Yang(Tony) 已提交
1266 1267 1268 1269
    BEFORE_WHILE_BLOCK = 0
    IN_WHILE_BLOCK = 1
    AFTER_WHILE_BLOCK = 2

C
chengduo 已提交
1270
    def __init__(self, cond, is_test=False, name=None):
1271
        self.helper = LayerHelper("while", name=name)
Y
Yang Yang(Tony) 已提交
1272
        self.status = While.BEFORE_WHILE_BLOCK
1273
        check_variable_and_dtype(cond, 'cond', ['bool'], 'fluid.layers.While')
Y
Yang Yang(Tony) 已提交
1274
        if reduce(lambda a, b: a * b, cond.shape, 1) != 1:
1275
            raise TypeError(
1276 1277 1278 1279
                "condition expected shape as [1], but given shape as {0}.".format(
                    list(cond.shape)
                )
            )
Y
Yang Yang(Tony) 已提交
1280
        self.cond_var = cond
C
chengduo 已提交
1281
        self.is_test = is_test
Y
Yang Yang(Tony) 已提交
1282 1283 1284 1285

    def block(self):
        return WhileGuard(self)

1286
    def _complete(self):
Y
Yang Yang(Tony) 已提交
1287 1288
        main_program = self.helper.main_program
        while_block = main_program.current_block()
1289
        parent_block = main_program.block(
1290 1291
            main_program.current_block().parent_idx
        )
Y
Yang Yang(Tony) 已提交
1292 1293 1294

        inner_outputs = {self.cond_var.name}
        x_name_list = set()
1295
        x_name_list, inner_outputs = get_inputs_outputs_in_block(
1296 1297
            while_block, x_name_list, inner_outputs, self.helper
        )
Y
Yang Yang(Tony) 已提交
1298 1299 1300

        out_vars = []
        for inner_out_name in inner_outputs:
X
Xin Pan 已提交
1301 1302 1303
            inner_var = parent_block._find_var_recursive(inner_out_name)
            if inner_var:
                out_vars.append(inner_var)
Y
Yang Yang(Tony) 已提交
1304

1305
        x_name_list |= set(map(lambda x: x.name, out_vars))
1306 1307 1308
        # NOTE(dev): cond_var has been contained in Input('Condition'), so
        # we remove it from Input('X')
        x_name_list -= {self.cond_var.name}
1309

Y
Yang Yang(Tony) 已提交
1310
        step_scope = parent_block.create_var(
1311 1312
            type=core.VarDesc.VarType.STEP_SCOPES
        )
Y
Yang Yang(Tony) 已提交
1313 1314 1315 1316

        parent_block.append_op(
            type='while',
            inputs={
1317 1318 1319 1320 1321
                'X': [
                    parent_block._var_recursive(x_name)
                    for x_name in x_name_list
                ],
                'Condition': [self.cond_var],
1322
            },
1323 1324 1325
            outputs={'Out': out_vars, 'StepScopes': [step_scope]},
            attrs={'sub_block': while_block, "is_test": self.is_test},
        )
Y
Yang Yang(Tony) 已提交
1326 1327


1328
support_ret_buildin_type = (bool, float, int)
1329 1330


1331
def assign_skip_lod_tensor_array(input, output):
1332
    """
1333
    Assign input to output, but skip the process of copying LoDTensorArray unless it's created in while_block.
1334
    """
1335 1336

    def has_shape_diff(x_var, y_var):
1337 1338
        if len(x_var.shape) != len(y_var.shape):
            return True
1339
        for x_dim, y_dim in zip(x_var.shape, y_var.shape):
1340 1341
            if x_dim != y_dim and -1 not in [x_dim, y_dim]:
                return True
1342 1343
        return False

1344
    if not isinstance(input, (Variable, core.VarBase)):
1345
        if isinstance(output, Variable) and isinstance(
1346 1347
            input, support_ret_buildin_type
        ):
1348 1349 1350
            assign(input, output)
        else:
            output = input
1351 1352
        return

1353 1354
    if input.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
        main_program = input.block.program
1355
        parent_block = main_program.block(
1356 1357
            main_program.current_block().parent_idx
        )
1358 1359 1360
        if parent_block and not parent_block._find_var_recursive(input.name):
            assign(input, output)
    else:
1361 1362 1363 1364 1365
        if (
            isinstance(output, Variable)
            and isinstance(input, Variable)
            and has_shape_diff(input, output)
        ):
1366
            warnings.warn(
1367 1368 1369 1370
                "In dy2static mode, we attemp to assign a variable with shape {} into a variable with shape{}, which is not always right.".format(
                    input.shape, output.shape
                )
            )
1371
        assign(input, output)
1372 1373


G
guofei 已提交
1374
def while_loop(cond, body, loop_vars, is_test=False, name=None):
G
guofei 已提交
1375
    """
1376 1377
    :api_attr: Static Graph

G
guofei 已提交
1378 1379
    while_loop is one of the control flows. Repeats while_loop `body` until `cond` returns False.

1380 1381 1382 1383
    Notice:
        Local variables defined in ``body`` cannot be obtained through ``fetch_list`` of ``Executor`` , variables should
        be defined outside ``body`` and placed in ``loop_vars`` for looping, then these variables can be fetched by ``fetch_list`` .

G
guofei 已提交
1384
    Args:
1385
        cond(Callable): A callable returning a boolean tensor controlling whether to continue looping. And ``cond`` takes
1386
            as many arguments as ``loop_vars`` .
1387 1388 1389
        body(Callable): A callable returning a tuple or list of tensors or LoDTensorArrays of the same arity
            (length and structure) and types as ``loops_vars`` . And ``body`` takes as many arguments as ``loop_vars`` .
        loop_vars(list|tuple): A list or tuple of tensors or LoDTensorArrays that is passed to both ``cond`` and ``body`` .
G
guofei 已提交
1390
        is_test(bool, optional): A flag indicating whether execution is in test phase. Default value is False.
G
guofei 已提交
1391 1392
        name(str, optional): Normally there is no need for users to set this property. For more information, please
            refer to :ref:`api_guide_Name`. Default is None.
1393

G
guofei 已提交
1394
    Returns:
C
Chen Long 已提交
1395
        A list or tuple of Tensors or LoDTensorArrays which returned by ``body`` .
G
guofei 已提交
1396 1397 1398 1399

    Examples:
        .. code-block:: python

1400 1401 1402
            import paddle
            paddle.enable_static()

1403 1404
            def cond(i, ten):
                return i < ten
G
guofei 已提交
1405

1406 1407 1408
            def body(i, ten):
                i = i + 1
                return [i, ten]
G
guofei 已提交
1409

C
Chen Long 已提交
1410 1411 1412 1413 1414 1415
            main_program = paddle.static.default_main_program()
            startup_program = paddle.static.default_startup_program()
            with paddle.static.program_guard(main_program, startup_program):
                i = paddle.full(shape=[1], fill_value=0, dtype='int64')     # loop counter
                ten = paddle.full(shape=[1], fill_value=10, dtype='int64')  # loop length
                i, ten = paddle.static.nn.while_loop(cond, body, [i, ten])
1416

C
Chen Long 已提交
1417
                exe = paddle.static.Executor(paddle.CPUPlace())
1418
                res = exe.run(main_program, feed={}, fetch_list=[i])
G
guofei 已提交
1419 1420 1421 1422 1423 1424 1425 1426
                print(res) # [array([10])]
    """
    helper = LayerHelper('while_loop', **locals())

    if not callable(cond):
        raise TypeError("cond in while_loop should be callable")
    if not callable(body):
        raise TypeError("body in while_loop should be callable")
1427
    check_type(loop_vars, 'loop_vars', (list, tuple), 'fluid.layers.while_loop')
G
guofei 已提交
1428 1429 1430 1431
    if len(loop_vars) == 0:
        raise ValueError("loop_vars in while_loop should not be empty")

    pre_cond = cond(*loop_vars)
1432 1433 1434
    check_variable_and_dtype(
        pre_cond, 'var of cond returned', ['bool'], 'fluid.layers.while_loop'
    )
G
guofei 已提交
1435 1436
    if reduce(lambda a, b: a * b, pre_cond.shape, 1) != 1:
        raise TypeError(
1437
            "the shape of the variable returned by cond should be [1],"
1438 1439
            "but given shape as {0}.".format(list(pre_cond.shape))
        )
G
guofei 已提交
1440

J
Jiabin Yang 已提交
1441
    if _non_static_mode():
1442
        now_cond = pre_cond.numpy()[0]
1443
        while now_cond:
1444 1445 1446 1447 1448 1449
            output_vars = body(*loop_vars)
            if not isinstance(output_vars, (list, tuple)):
                output_vars = [output_vars]
            if len(output_vars) != len(loop_vars):
                raise ValueError(
                    "body in while_loop should return the same arity "
1450 1451
                    "(length and structure) and types as loop_vars"
                )
1452
            now_cond = cond(*output_vars).numpy()[0]
1453
            map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
1454 1455
        return loop_vars

G
guofei 已提交
1456
    while_loop_block = While(pre_cond, is_test, name)
1457
    has_mutable_vars_in_loop = hold_mutable_vars(loop_vars)
G
guofei 已提交
1458
    with while_loop_block.block():
1459 1460 1461 1462 1463 1464 1465 1466 1467
        # If a variable with mutable type is included in loop_vars, like `dict/list`,
        # modifying it in the body function will cause origin variable to be modified
        # synchronously. This will raise an assignment error out of while block.
        # Here we make a copy of the mutable vars to avoid this problem.
        if has_mutable_vars_in_loop:
            new_loop_vars = copy_mutable_vars(loop_vars)
            output_vars = body(*new_loop_vars)
        else:
            output_vars = body(*loop_vars)
1468 1469
        if not isinstance(output_vars, (list, tuple)):
            output_vars = [output_vars]
1470
        try:
1471
            loop_vars = _deal_with_undefined_var(output_vars, loop_vars)
1472 1473
            assert_same_structure(output_vars, loop_vars, check_types=False)
        except ValueError as e:
1474 1475
            raise ValueError(
                "body in while_loop should return the same arity "
1476 1477
                "(length and structure) as loop_vars: {0}".format(e)
            )
1478
        now_cond = cond(*output_vars)
1479
        map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
G
guofei 已提交
1480 1481 1482 1483
        assign(now_cond, pre_cond)
    return loop_vars


1484
def _deal_with_undefined_var(output_vars, loop_vars):
1485 1486 1487 1488 1489 1490 1491
    """Deal with undefined var cases, We create undefined variable based on the results of body().
    In Dy2Static, we use undefined var to represent the var created in control flow. This function
    expand the loop_vars and replace original loop_vars.
    1. UndefinedVar = Variable      # create a variable
    2. UndefinedVar = None          # create a undefined var with RETURN_NO_VALUE_MAGIC_NUM
    3. UndefinedVar = List(int)     # create a list of variable
    4. UndefinedVar = value         # create a variable
1492
    """
1493 1494 1495 1496
    from paddle.fluid.dygraph.dygraph_to_static.utils import (
        UndefinedVar,
        create_undefined_variable,
    )
1497 1498

    def create_var_like(o_var):
1499 1500 1501 1502
        if (
            isinstance(o_var, (Variable,) + support_ret_buildin_type)
            or o_var is None
        ):
1503
            return create_undefined_variable()
1504
        if is_sequence(o_var):
1505
            """
1506 1507 1508
            Create a complex container class inside the body of while, including Python list and python Dict
            """
            return map_structure(lambda x: create_undefined_variable(), o_var)
1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521

    if len(output_vars) != len(loop_vars):
        raise ValueError("The length of loop_vars should be the same.")

    results = []
    for o_var, l_var in zip(output_vars, loop_vars):
        if isinstance(l_var, UndefinedVar) or l_var is None:
            results.append(create_var_like(o_var))
        else:
            results.append(l_var)
    return results


1522
def lod_rank_table(x, level=0):
1523 1524
    """
    LoD Rank Table Operator. Given an input variable **x** and a level number
Y
yangyaming 已提交
1525 1526
    of LoD, this layer creates a LodRankTable object. A LoDRankTable object
    contains a list of bi-element tuples. Each tuple consists of an index and
1527
    a length, both of which are int type. Refering to specified level of LoD,
T
tianshuo78520a 已提交
1528
    the index is the sequence index number and the length represents the
Y
yangyaming 已提交
1529 1530
    sequence length. Please note that the list is ranked in descending order by
    the length. The following is an example:
Y
yangyaming 已提交
1531 1532 1533 1534

        .. code-block:: text

            x is a LoDTensor:
1535 1536
                x.lod = [[2,                1],
                         [5,             1, 1]]
Y
yangyaming 已提交
1537 1538
                x.data = [a, b, c, d, e, f, g]

Y
yangyaming 已提交
1539 1540 1541
            1. set level to 0:
                Create lod rank table:
                    lod_rank_table_obj = lod_rank_table(x, level=0)
Y
yangyaming 已提交
1542

Y
yangyaming 已提交
1543 1544 1545 1546 1547 1548 1549 1550 1551
                Get:
                    lod_rank_table_obj.items() = [(0, 2), (1, 1)]

            2. set level to 1:
                Create lod rank table:
                    lod_rank_table_obj = lod_rank_table(x, level=1)

                Get:
                    lod_rank_table_obj.items() = [(0, 5), (1, 1), (2, 1)]
Y
yangyaming 已提交
1552 1553 1554 1555

    Args:
        x (Variable): Input variable, a LoDTensor based which to create the lod
            rank table.
Y
yangyaming 已提交
1556 1557
        level (int): Specify the LoD level, on which to create the lod rank
            table.
Y
yangyaming 已提交
1558 1559 1560 1561 1562 1563 1564

    Returns:
        Variable: The created LoDRankTable object.

    Examples:
        .. code-block:: python

1565
            import paddle.fluid as fluid
Y
yangyaming 已提交
1566
            x = fluid.layers.data(name='x', shape=[10],
1567
                                  dtype='float32', lod_level=1)
Y
yangyaming 已提交
1568
            out = layers.lod_rank_table(x=x, level=0)
1569
    """
1570 1571 1572
    check_type(x, 'x', (Variable, list), 'lod_rank_table')
    if isinstance(x, (list)):
        for i, input_x in enumerate(x):
1573 1574 1575
            check_type(
                input_x, 'input[' + str(i) + ']', Variable, 'lod_rank_table'
            )
1576

Y
Yu Yang 已提交
1577
    helper = LayerHelper("lod_rank_table", **locals())
1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
    table = helper.create_variable(
        type=core.VarDesc.VarType.LOD_RANK_TABLE,
        name=unique_name.generate("lod_rank_table"),
    )
    helper.append_op(
        type='lod_rank_table',
        inputs={'X': x},
        outputs={'Out': table},
        attrs={'level': level},
    )
Y
Yu Yang 已提交
1588
    return table
Y
Yu Yang 已提交
1589 1590


Y
yuyang18 已提交
1591
@templatedoc()
1592
def max_sequence_len(rank_table):
Y
yuyang18 已提交
1593 1594 1595 1596 1597 1598 1599 1600
    """
    ${comment}

    >>> import paddle.fluid as fluid
    >>> x = fluid.layers.data(name='x', shape=[10], dtype='float32',
    >>>                       lod_level=1)
    >>> rank_table = layers.lod_rank_table(x=x, level=0)
    >>> max_seq_len = layers.max_sequence_len(rank_table)
Y
yangyaming 已提交
1601 1602

    Args:
Y
yuyang18 已提交
1603
        rank_table(${rank_table_type}): ${rank_table_comment}.
Y
yangyaming 已提交
1604 1605

    Returns:
Y
yuyang18 已提交
1606
        ${out_comment}.
F
fengjiayi 已提交
1607 1608
    """
    helper = LayerHelper("max_seqence_len", **locals())
X
Xin Pan 已提交
1609
    res = helper.create_variable_for_type_inference(dtype="int64")
1610 1611 1612 1613 1614
    helper.append_op(
        type="max_sequence_len",
        inputs={"RankTable": rank_table},
        outputs={"Out": res},
    )
F
fengjiayi 已提交
1615 1616 1617
    return res


1618
def lod_tensor_to_array(x, table):
1619
    """
F
fengjiayi 已提交
1620 1621
    Convert a LoDTensor to a LoDTensorArray.

1622 1623 1624 1625 1626
    This function split a LoDTesnor to a LoDTensorArray according to its LoD
    information. LoDTensorArray is an alias of C++ std::vector<LoDTensor> in
    PaddlePaddle. The generated LoDTensorArray of this function can be further read
    or written by `read_from_array()` and `write_to_array()` operators. However,
    this function is generally an internal component of PaddlePaddle `DynamicRNN`.
F
fengjiayi 已提交
1627
    Users should not use it directly.
1628 1629

    Args:
F
fengjiayi 已提交
1630
        x (Variable|list): The LoDTensor to be converted to a LoDTensorArray.
1631 1632
        table (ParamAttr|list): The variable that stores the level of lod
                                which is ordered by sequence length in
1633
                                descending order. It is generally generated
F
fengjiayi 已提交
1634
                                by `layers.lod_rank_table()` API.
1635 1636

    Returns:
F
fengjiayi 已提交
1637
        Variable: The LoDTensorArray that has been converted from the input tensor.
1638 1639 1640 1641

    Examples:
        .. code-block:: python

1642
          import paddle.fluid as fluid
1643 1644 1645
          x = fluid.layers.data(name='x', shape=[10])
          table = fluid.layers.lod_rank_table(x, level=0)
          array = fluid.layers.lod_tensor_to_array(x, table)
1646
    """
1647 1648 1649
    check_type(x, 'x', (Variable, list), 'lod_tensor_to_array')
    if isinstance(x, (list)):
        for i, input_x in enumerate(x):
1650 1651 1652 1653 1654 1655
            check_type(
                input_x,
                'input[' + str(i) + ']',
                Variable,
                'lod_tensor_to_array',
            )
1656 1657 1658
    check_type(table, 'table', (Variable, list), 'lod_tensor_to_array')
    if isinstance(table, (list)):
        for i, table_x in enumerate(table):
1659 1660 1661 1662 1663 1664
            check_type(
                table_x,
                'table[' + str(i) + ']',
                Variable,
                'lod_tensor_to_array',
            )
1665 1666
    helper = LayerHelper("lod_tensor_to_array", **locals())
    array = helper.create_variable(
Y
Yu Yang 已提交
1667
        name=unique_name.generate("lod_tensor_to_array"),
1668
        type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
1669 1670 1671 1672 1673 1674 1675
        dtype=x.dtype,
    )
    helper.append_op(
        type='lod_tensor_to_array',
        inputs={'X': x, 'RankTable': table},
        outputs={'Out': array},
    )
1676 1677 1678
    return array


1679
def array_to_lod_tensor(x, table):
1680
    """Convert a LoD_Tensor_Aarry to an LoDTensor.
1681 1682

    Args:
1683
        x (Variable|list): The lod tensor array to be converted to a tensor.
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
        table (ParamAttr|list): The variable that stores the level of lod
                                which is ordered by sequence length in
                                descending order.

    Returns:
        Variable: The variable of type tensor that has been converted
                  from an array.

    Examples:
        .. code-block:: python

1695
          import paddle.fluid as fluid
1696 1697 1698 1699
          x = fluid.layers.data(name='x', shape=[10])
          table = fluid.layers.lod_rank_table(x, level=0)
          array = fluid.layers.lod_tensor_to_array(x, table)
          lod_tensor = fluid.layers.array_to_lod_tensor(array, table)
1700
    """
1701 1702 1703
    check_type(x, 'x', (Variable, list), 'array_to_lod_tensor')
    if isinstance(x, (list)):
        for i, input_x in enumerate(x):
1704 1705 1706 1707 1708 1709
            check_type(
                input_x,
                'input[' + str(i) + ']',
                Variable,
                'array_to_lod_tensor',
            )
1710 1711 1712
    check_type(table, 'table', (Variable, list), 'array_to_lod_tensor')
    if isinstance(table, (list)):
        for i, table_x in enumerate(table):
1713 1714 1715 1716 1717 1718
            check_type(
                table_x,
                'table[' + str(i) + ']',
                Variable,
                'array_to_lod_tensor',
            )
1719

1720
    helper = LayerHelper("array_to_lod_tensor", **locals())
X
Xin Pan 已提交
1721
    tmp = helper.create_variable_for_type_inference(dtype=x.dtype)
1722 1723 1724 1725 1726
    helper.append_op(
        type="array_to_lod_tensor",
        inputs={'X': x, 'RankTable': table},
        outputs={'Out': tmp},
    )
1727 1728 1729
    return tmp


1730
def increment(x, value=1.0, in_place=True):
1731
    """
1732 1733
    The OP is usually used for control flow to increment the data of :attr:`x` by an amount :attr:`value`.
    Notice that the number of elements in :attr:`x` must be equal to 1.
1734

1735
    Parameters:
T
tianshuo78520a 已提交
1736
        x (Variable): A tensor that must always contain only one element, its data type supports
1737 1738 1739
            float32, float64, int32 and int64.
        value (float, optional): The amount to increment the data of :attr:`x`. Default: 1.0.
        in_place (bool, optional): Whether the OP should be performed in-place. Default: True.
1740 1741

    Returns:
1742
        Variable: The elementwise-incremented tensor with the same shape and data type as :attr:`x`.
1743 1744 1745 1746

    Examples:
        .. code-block:: python

1747
          import paddle.fluid as fluid
1748 1749
          counter = fluid.layers.zeros(shape=[1], dtype='float32') # [0.]
          fluid.layers.increment(counter) # [1.]
1750
    """
H
hong 已提交
1751
    if in_dygraph_mode():
1752
        return _C_ops.increment_(x, value)
H
hong 已提交
1753

1754 1755 1756
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int32', 'int64'], 'increment'
    )
Y
Yu Yang 已提交
1757
    helper = LayerHelper("increment", **locals())
Y
Yang Yang(Tony) 已提交
1758
    if not in_place:
X
Xin Pan 已提交
1759
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
Y
Yang Yang(Tony) 已提交
1760 1761
    else:
        out = x
1762 1763 1764 1765 1766 1767
    helper.append_op(
        type='increment',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'step': float(value)},
    )
Y
Yang Yu 已提交
1768
    return out
Y
Yu Yang 已提交
1769 1770


1771
def array_write(x, i, array=None):
1772
    """
1773 1774 1775 1776
    This OP writes the input ``x`` into the i-th position of the ``array``
    :ref:`api_fluid_LoDTensorArray` and returns the modified array.
    If ``array`` is none, a new LoDTensorArray will be created and returned.
    This OP is often used together with :ref:`api_fluid_layers_array_read` OP.
1777 1778

    Args:
1779 1780 1781 1782
        x (Variable): The input data to be written into array. It's multi-dimensional
            Tensor or LoDTensor. Data type: float32, float64, int32, int64.
        i (Variable): 1-D Tensor with shape [1], which represents the position into which
            ``x`` is written. Data type: int64.
1783 1784
        array (LoDTensorArray, optional): The LoDTensorArray into which ``x`` is written.
            The default value is None, when a new LoDTensorArray will be created and returned
1785
            as a result.
1786

1787
    Returns:
1788
        Variable: The input ``array`` after ``x`` is written into.
1789 1790

    Examples:
D
dzhwinter 已提交
1791
        .. code-block:: python
1792

1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
            import paddle.fluid as fluid
            tmp = fluid.layers.fill_constant(shape=[3, 2], dtype='int64', value=5)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
            # Write tmp into the position of arr with subscript 10 and return arr.
            arr = fluid.layers.array_write(tmp, i=i)

            # Now, arr is a LoDTensorArray with length 11. We can use array_read OP to read
            # the data at subscript 10 and print it out.
            item = fluid.layers.array_read(arr, i=i)
            input = fluid.layers.Print(item, message="The content of i-th LoDTensor:")
            main_program = fluid.default_main_program()
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(main_program)

            # The printed result is:
            # 1570533133    The content of i-th LoDTensor:  The place is:CPUPlace
            # Tensor[array_read_0.tmp_0]
            #    shape: [3,2,]
            #    dtype: l
            #    data: 5,5,5,5,5,5,

            # the output is 2-D Tensor with shape [3,2], which is tmp above.
            # dtype is the corresponding C++ data type, which may vary in different environments.
1816 1817
            # Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
            #       so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
1818 1819
            #       and '__int64' on Windows. They both represent 64-bit integer variables.

1820
    """
J
Jiabin Yang 已提交
1821
    if _non_static_mode():
1822 1823 1824 1825 1826 1827 1828 1829 1830
        assert isinstance(
            x, Variable
        ), "The input data 'x' in array_write must be Variable in dygraph mode"
        assert isinstance(
            i, Variable
        ), "The index 'i' in array_write must be Variable in dygraph mode"
        assert i.shape == [
            1
        ], "The shape of index 'i' should be [1] in dygraph mode"
1831
        i = i.numpy().item(0)
1832 1833 1834
        if array is None:
            array = create_array(x.dtype)
        assert isinstance(
1835 1836
            array, list
        ), "The 'array' in array_write must be a list in dygraph mode"
1837 1838 1839 1840 1841 1842 1843 1844 1845
        assert i <= len(
            array
        ), "The index 'i' should not be greater than the length of 'array' in dygraph mode"
        if i < len(array):
            array[i] = x
        else:
            array.append(x)
        return array

1846 1847
    check_variable_and_dtype(i, 'i', ['int64'], 'array_write')
    check_type(x, 'x', (Variable), 'array_write')
Y
Yu Yang 已提交
1848
    helper = LayerHelper('array_write', **locals())
1849
    if array is not None:
1850 1851 1852 1853
        if (
            not isinstance(array, Variable)
            or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY
        ):
1854
            raise TypeError(
1855 1856
                "array should be tensor array vairable in array_write Op"
            )
Y
Yu Yang 已提交
1857 1858 1859 1860
    if array is None:
        array = helper.create_variable(
            name="{0}.out".format(helper.name),
            type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
1861 1862 1863 1864 1865 1866 1867
            dtype=x.dtype,
        )
    helper.append_op(
        type='write_to_array',
        inputs={'X': [x], 'I': [i]},
        outputs={'Out': [array]},
    )
Y
Yu Yang 已提交
1868 1869 1870
    return array


1871
def create_array(dtype, initialized_list=None):
1872
    """
1873
    This OP creates an LOD_TENSOR_ARRAY. It is used as
1874
    the input of :ref:`api_fluid_layers_array_read` and
1875 1876
    :ref:`api_fluid_layers_array_write`. Also it can be used
    with  :ref:`api_fluid_layers_While` to create RNN network.
1877 1878

    Args:
1879 1880
        dtype (str): The data type of the elements in the lod_tensor_array.
                     Support data type: float32, float64, int32, int64.
1881 1882
        initialized_list(list): Used to initialize as default value for created array.
                    All values in initialized list should be a Tensor.
1883 1884

    Returns:
1885
        Variable: The empty lod_tensor_array. The data type of elements in Tensor is ``dtype``.
1886 1887 1888 1889

    Examples:
        .. code-block:: python

1890
          import paddle.fluid as fluid
1891
          data = fluid.layers.create_array(dtype='float32') # Create a float32 LoDTensorArray.
1892 1893

    """
1894 1895 1896 1897
    array = []
    if initialized_list is not None:
        if not isinstance(initialized_list, (list, tuple)):
            raise TypeError(
1898 1899 1900 1901
                "Require type(initialized_list) should be list/tuple, but received {}".format(
                    type(initialized_list)
                )
            )
1902 1903 1904 1905 1906 1907
        array = list(initialized_list)

    # NOTE: Only support plain list like [x, y,...], not support nested list in static mode.
    for val in array:
        if not isinstance(val, Variable):
            raise TypeError(
1908 1909 1910 1911
                "All values in `initialized_list` should be Variable, but recevied {}.".format(
                    type(val)
                )
            )
1912

J
Jiabin Yang 已提交
1913
    if _non_static_mode():
1914
        return array
1915

Y
Yang Yang(Tony) 已提交
1916
    helper = LayerHelper("array", **locals())
1917
    tensor_array = helper.create_variable(
Y
Yang Yang(Tony) 已提交
1918 1919
        name="{0}.out".format(helper.name),
        type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
1920 1921
        dtype=dtype,
    )
Y
Yang Yang(Tony) 已提交
1922

1923 1924 1925 1926 1927
    for val in array:
        array_write(x=val, i=array_length(tensor_array), array=tensor_array)

    return tensor_array

Y
Yang Yang(Tony) 已提交
1928

Y
yuyang18 已提交
1929
@templatedoc()
W
wawltor 已提交
1930
def less_than(x, y, force_cpu=None, cond=None, name=None):
1931
    """
1932

Y
yuyang18 已提交
1933
    ${comment}
1934 1935

    Args:
N
Noel 已提交
1936 1937
        x(Tensor): ${x_comment}.
        y(Tensor): ${y_comment}.
Y
yuyang18 已提交
1938
        force_cpu(${force_cpu_type}): ${force_cpu_comment}.
N
Noel 已提交
1939
        cond(Tensor, optional): Optional output which can be any created Tensor
1940
            that meets the requirements to store the result of *less_than*.
N
Noel 已提交
1941
            if cond is None, a new Tensor will be created to store the result.
W
wawltor 已提交
1942 1943
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1944
    Returns:
Y
yuyang18 已提交
1945
        ${out_comment}.
1946 1947 1948 1949

    Examples:
        .. code-block:: python

N
Noel 已提交
1950 1951 1952 1953 1954 1955 1956
            import paddle

            x = paddle.to_tensor([1, 2, 3, 4], dtype='float32')
            y = paddle.to_tensor([2, 2, 1, 3], dtype='float32')
            result = paddle.less_than(x, y)
            print(result) # [True, False, False, False]

1957
    """
1958 1959 1960 1961 1962 1963
    check_variable_and_dtype(
        x, "x", ["float32", "float64", "int32", "int64"], "less_than"
    )
    check_variable_and_dtype(
        y, "y", ["float32", "float64", "int32", "int64"], "less_than"
    )
1964 1965
    if cond is not None:
        check_type(cond, "cond", Variable, "less_than")
1966
    if force_cpu is not None:
1967 1968
        check_type(force_cpu, "force_cpu", bool, "less_than")

Y
Yang Yang(Tony) 已提交
1969 1970
    helper = LayerHelper("less_than", **locals())
    if cond is None:
X
Xin Pan 已提交
1971
        cond = helper.create_variable_for_type_inference(dtype='bool')
Y
Yang Yang(Tony) 已提交
1972 1973
        cond.stop_gradient = True

Y
yuyang18 已提交
1974 1975 1976 1977
    attrs = dict()
    if force_cpu is not None:
        attrs['force_cpu'] = force_cpu

1978 1979 1980 1981 1982 1983
    helper.append_op(
        type='less_than',
        inputs={'X': [x], 'Y': [y]},
        outputs={'Out': [cond]},
        attrs=attrs,
    )
Y
Yang Yang(Tony) 已提交
1984 1985 1986
    return cond


Z
zhoukunsheng 已提交
1987
@templatedoc()
W
wawltor 已提交
1988
def less_equal(x, y, cond=None, name=None):
Z
zhoukunsheng 已提交
1989
    """
1990
    :alias_main: paddle.less_equal
1991 1992
        :alias: paddle.less_equal,paddle.tensor.less_equal,paddle.tensor.logic.less_equal
        :old_api: paddle.fluid.layers.less_equal
1993

1994
    This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
Z
zhoukunsheng 已提交
1995 1996

    Args:
1997
        x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
1998
        y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
1999 2000
        cond(Variable, optional): Optional output which can be any created Variable that meets the requirements to store the result of *less_equal*.
            if cond is None, a new Varibale will be created to store the result.
W
wawltor 已提交
2001 2002
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
Z
zhoukunsheng 已提交
2003 2004

    Returns:
2005
        Variable, the output data type is bool: The tensor variable storing the output, the output shape is same as input :attr:`x`.
Z
zhoukunsheng 已提交
2006 2007 2008 2009

    Examples:
        .. code-block:: python

2010
          import paddle.fluid as fluid
2011 2012 2013 2014 2015 2016
          import numpy as np
          label = fluid.layers.assign(np.array([1, 3], dtype='int32'))
          limit = fluid.layers.assign(np.array([1, 2], dtype='int32'))
          out = fluid.layers.less_equal(x=label, y=limit) #out=[True, False]
          out1 = label<= limit #out1=[True, False]

Z
zhoukunsheng 已提交
2017
    """
2018 2019 2020 2021 2022 2023
    check_variable_and_dtype(
        x, "x", ["float32", "float64", "int32", "int64"], "less_equal"
    )
    check_variable_and_dtype(
        y, "y", ["float32", "float64", "int32", "int64"], "less_equal"
    )
2024
    if cond is not None:
2025
        check_type(cond, "cond", Variable, "less_equal")
2026

Z
zhoukunsheng 已提交
2027 2028 2029 2030 2031 2032 2033
    helper = LayerHelper("less_equal", **locals())
    if cond is None:
        cond = helper.create_variable_for_type_inference(dtype='bool')
        cond.stop_gradient = True

    attrs = dict()

2034 2035 2036 2037 2038 2039
    helper.append_op(
        type='less_equal',
        inputs={'X': [x], 'Y': [y]},
        outputs={'Out': [cond]},
        attrs=attrs,
    )
Z
zhoukunsheng 已提交
2040 2041 2042 2043
    return cond


@templatedoc()
W
wawltor 已提交
2044
def greater_than(x, y, cond=None, name=None):
Z
zhoukunsheng 已提交
2045
    """
2046
    :alias_main: paddle.greater_than
2047 2048
        :alias: paddle.greater_than,paddle.tensor.greater_than,paddle.tensor.logic.greater_than
        :old_api: paddle.fluid.layers.greater_than
2049

2050
    This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
Z
zhoukunsheng 已提交
2051 2052

    Args:
2053
        x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
2054
        y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
2055 2056
        cond(Variable, optional): Optional output which can be any created Variable that meets the requirements to store the result of *greater_than*.
            if cond is None, a new Varibale will be created to store the result.
W
wawltor 已提交
2057 2058
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
Z
zhoukunsheng 已提交
2059 2060

    Returns:
2061
        Variable, the output data type is bool: The tensor variable storing the output, the output shape is same as input :attr:`x` .
Z
zhoukunsheng 已提交
2062 2063 2064 2065

    Examples:
        .. code-block:: python

2066
          import paddle.fluid as fluid
2067 2068 2069 2070 2071
          import numpy as np
          label = fluid.layers.assign(np.array([2, 3], dtype='int32'))
          limit = fluid.layers.assign(np.array([3, 2], dtype='int32'))
          out = fluid.layers.greater_than(x=label, y=limit) #out=[False, True]
          out1 = label > limit #out1=[False, True]
Z
zhoukunsheng 已提交
2072
    """
2073 2074 2075 2076 2077 2078
    check_variable_and_dtype(
        x, "x", ["float32", "float64", "int32", "int64"], "greater_than"
    )
    check_variable_and_dtype(
        y, "y", ["float32", "float64", "int32", "int64"], "greater_than"
    )
2079
    if cond is not None:
2080
        check_type(cond, "cond", Variable, "greater_than")
2081

Z
zhoukunsheng 已提交
2082 2083 2084 2085 2086 2087 2088
    helper = LayerHelper("greater_than", **locals())
    if cond is None:
        cond = helper.create_variable_for_type_inference(dtype='bool')
        cond.stop_gradient = True

    attrs = dict()

2089
    if in_dygraph_mode():
2090
        return _C_ops.greater_than(x, y)
2091
    else:
2092 2093 2094 2095 2096 2097
        helper.append_op(
            type='greater_than',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [cond]},
            attrs=attrs,
        )
2098
        return cond
Z
zhoukunsheng 已提交
2099 2100 2101


@templatedoc()
W
wawltor 已提交
2102
def greater_equal(x, y, cond=None, name=None):
Z
zhoukunsheng 已提交
2103
    """
2104
    :alias_main: paddle.greater_equal
2105 2106
        :alias: paddle.greater_equal,paddle.tensor.greater_equal,paddle.tensor.logic.greater_equal
        :old_api: paddle.fluid.layers.greater_equal
2107

2108
    This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
Z
zhoukunsheng 已提交
2109 2110

    Args:
2111
        x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
2112
        y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
2113 2114
        cond(Variable, optional): Optional output which can be any created Variable that meets the requirements to store the result of *greater_equal*.
            if cond is None, a new Varibale will be created to store the result.
W
wawltor 已提交
2115 2116
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
Z
zhoukunsheng 已提交
2117 2118

    Returns:
2119
        Variable, the output data type is bool: The tensor variable storing the output, the output shape is same as input :attr:`x`.
Z
zhoukunsheng 已提交
2120 2121 2122 2123

    Examples:
        .. code-block:: python

2124
          import paddle.fluid as fluid
2125 2126 2127 2128 2129 2130
          import numpy as np

          label = fluid.layers.assign(np.array([2, 2], dtype='int32'))
          limit = fluid.layers.assign(np.array([2, 3], dtype='int32'))
          out = fluid.layers.greater_equal(x=label, y=limit) #out=[True, False]
          out_1 = label >= limit #out1=[True, False]
2131

Z
zhoukunsheng 已提交
2132
    """
2133 2134 2135 2136 2137 2138
    check_variable_and_dtype(
        x, "x", ["float32", "float64", "int32", "int64"], "greater_equal"
    )
    check_variable_and_dtype(
        y, "y", ["float32", "float64", "int32", "int64"], "greater_equal"
    )
2139
    if cond is not None:
2140
        check_type(cond, "cond", Variable, "greater_equal")
2141

Z
zhoukunsheng 已提交
2142 2143 2144 2145 2146 2147 2148
    helper = LayerHelper("greater_equal", **locals())
    if cond is None:
        cond = helper.create_variable_for_type_inference(dtype='bool')
        cond.stop_gradient = True

    attrs = dict()

2149 2150 2151 2152 2153 2154
    helper.append_op(
        type='greater_equal',
        inputs={'X': [x], 'Y': [y]},
        outputs={'Out': [cond]},
        attrs=attrs,
    )
Z
zhoukunsheng 已提交
2155 2156 2157
    return cond


W
wawltor 已提交
2158
def equal(x, y, cond=None, name=None):
2159 2160 2161 2162
    """
    This layer returns the truth value of :math:`x == y` elementwise.

    Args:
W
wangchaochaohu 已提交
2163 2164
        x(Variable): Tensor, data type is float32, float64, int32, int64.
        y(Variable): Tensor, data type is float32, float64, int32, int64.
2165
        cond(Variable, optional): Optional output which can be any created
W
wangchaochaohu 已提交
2166 2167
            Variable that meets the requirements to store the result of *equal*.
            if cond is None, a new Varibale will be created to store the result.
W
wawltor 已提交
2168 2169
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
2170 2171

    Returns:
W
wangchaochaohu 已提交
2172 2173
        Variable: output Tensor, it's shape is the same as the input's Tensor,
        and the data type is bool.
2174 2175 2176 2177

    Examples:
        .. code-block:: python

2178
          import paddle.fluid as fluid
W
wangchaochaohu 已提交
2179 2180 2181 2182 2183 2184 2185
          import numpy as np
          out_cond =fluid.data(name="input1", shape=[2], dtype='bool')
          label = fluid.layers.assign(np.array([3, 3], dtype="int32"))
          limit = fluid.layers.assign(np.array([3, 2], dtype="int32"))
          label_cond = fluid.layers.assign(np.array([1, 2], dtype="int32"))
          out1 = fluid.layers.equal(x=label,y=limit) #out1=[True, False]
          out2 = fluid.layers.equal(x=label_cond,y=limit, cond=out_cond) #out2=[False, True] out_cond=[False, True]
2186
    """
H
hong 已提交
2187
    if in_dygraph_mode():
2188
        return _C_ops.equal(x, y)
H
hong 已提交
2189

2190 2191 2192 2193 2194 2195
    check_variable_and_dtype(
        x, "x", ["float32", "float64", "int32", "int64"], "equal"
    )
    check_variable_and_dtype(
        y, "y", ["float32", "float64", "int32", "int64"], "equal"
    )
2196
    if cond is not None:
2197
        check_type(cond, "cond", Variable, "equal")
2198

2199 2200
    helper = LayerHelper("equal", **locals())
    if cond is None:
X
Xin Pan 已提交
2201
        cond = helper.create_variable_for_type_inference(dtype='bool')
2202 2203
        cond.stop_gradient = True

2204 2205 2206
    helper.append_op(
        type='equal', inputs={'X': [x], 'Y': [y]}, outputs={'Out': [cond]}
    )
2207 2208 2209
    return cond


W
wawltor 已提交
2210
def not_equal(x, y, cond=None, name=None):
Z
zhoukunsheng 已提交
2211
    """
2212
    :alias_main: paddle.not_equal
2213 2214
        :alias: paddle.not_equal,paddle.tensor.not_equal,paddle.tensor.logic.not_equal
        :old_api: paddle.fluid.layers.not_equal
2215

2216
    This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
Z
zhoukunsheng 已提交
2217 2218

    Args:
2219
        x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
2220
        y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
2221 2222
        cond(Variable, optional): Optional output which can be any created Variable that meets the requirements to store the result of *not_equal*.
            if cond is None, a new Varibale will be created to store the result.
W
wawltor 已提交
2223 2224
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
Z
zhoukunsheng 已提交
2225 2226

    Returns:
2227
        Variable, the output data type is bool: The tensor variable storing the output, the output shape is same as input :attr:`x`.
Z
zhoukunsheng 已提交
2228 2229 2230 2231

    Examples:
        .. code-block:: python

2232
          import paddle.fluid as fluid
2233

2234 2235
          label = fluid.layers.data(name='label', shape=[1], dtype='int64')
          limit = fluid.layers.fill_constant(shape=[1], value=1, dtype='int64')
Z
zhoukunsheng 已提交
2236 2237
          out = fluid.layers.not_equal(x=label, y=limit)
    """
2238 2239 2240 2241 2242 2243
    check_variable_and_dtype(
        x, "x", ["float32", "float64", "int32", "int64"], "not_equal"
    )
    check_variable_and_dtype(
        y, "y", ["float32", "float64", "int32", "int64"], "not_equal"
    )
2244
    if cond is not None:
2245
        check_type(cond, "cond", Variable, "not_equal")
2246

Z
zhoukunsheng 已提交
2247 2248 2249 2250 2251
    helper = LayerHelper("not_equal", **locals())
    if cond is None:
        cond = helper.create_variable_for_type_inference(dtype='bool')
        cond.stop_gradient = True

2252 2253 2254
    helper.append_op(
        type='not_equal', inputs={'X': [x], 'Y': [y]}, outputs={'Out': [cond]}
    )
Z
zhoukunsheng 已提交
2255 2256 2257
    return cond


2258
def array_read(array, i):
2259
    """
2260
    This OP is used to read data at the specified position from the input array
2261
    :ref:`api_fluid_LoDTensorArray` . ``array`` is the input array and ``i``
2262
    is the specified read position. This OP is often used together with
2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
    :ref:`api_fluid_layers_array_write` OP.

    Case 1:
    ::
        Input:
            The shape of first three tensors are [1], and that of the last one is [1,2]:
                array = ([0.6], [0.1], [0.3], [0.4, 0.2])
            And:
                i = [3]

        Output:
            output = [0.4, 0.2]
2275

K
kavyasrinet 已提交
2276
    Args:
2277 2278 2279
        array (LoDTensorArray): The input LoDTensorArray.
        i (Variable): 1-D Tensor, whose shape is [1] and dtype is int64. It represents the
            specified read position of ``array``.
2280

K
kavyasrinet 已提交
2281
    Returns:
2282
        Variable: The LoDTensor or Tensor that is read at the specified position of ``array``.
2283

K
kavyasrinet 已提交
2284
    Examples:
2285 2286
        .. code-block:: python

2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314
            # First we're going to create a LoDTensorArray, then we're going to write the Tensor into
            # the specified position, and finally we're going to read the Tensor at that position.
            import paddle.fluid as fluid
            arr = fluid.layers.create_array(dtype='float32')
            tmp = fluid.layers.fill_constant(shape=[3, 2], dtype='int64', value=5)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
            # tmp is the Tensor with shape [3,2], and if we write it into the position with subscript 10
            # of the empty-array: arr, then the length of arr becomes 11.
            arr = fluid.layers.array_write(tmp, i, array=arr)
            # Read the data of the position with subscript 10.
            item = fluid.layers.array_read(arr, i)

            # You can print out the data via executor.
            input = fluid.layers.Print(item, message="The LoDTensor of the i-th position:")
            main_program = fluid.default_main_program()
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(main_program)

            # The printed result is:

            # 1569588169  The LoDTensor of the i-th position: The place is:CPUPlace
            # Tensor[array_read_0.tmp_0]
            #    shape: [3,2,]
            #    dtype: l
            #    data: 5,5,5,5,5,5,

            # the output is 2-D Tensor with shape [3,2].
            # dtype is the corresponding C++ data type, which may vary in different environments.
2315 2316
            # Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
            #       so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
2317
            #       and '__int64' on Windows. They both represent 64-bit integer variables.
2318
    """
J
Jiabin Yang 已提交
2319
    if _non_static_mode():
2320
        assert isinstance(
2321 2322
            array, list
        ), "The 'array' in array_read must be list in dygraph mode"
2323 2324 2325 2326 2327 2328
        assert isinstance(
            i, Variable
        ), "The index 'i' in array_read must be Variable in dygraph mode"
        assert i.shape == [
            1
        ], "The shape of index 'i' should be [1] in dygraph mode"
2329
        i = i.numpy().item(0)
2330 2331
        return array[i]

2332
    check_variable_and_dtype(i, 'i', ['int64'], 'array_read')
Y
Yu Yang 已提交
2333
    helper = LayerHelper('array_read', **locals())
2334 2335 2336 2337
    if (
        not isinstance(array, Variable)
        or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY
    ):
Y
Yu Yang 已提交
2338
        raise TypeError("array should be tensor array vairable")
X
Xin Pan 已提交
2339
    out = helper.create_variable_for_type_inference(dtype=array.dtype)
2340 2341 2342 2343 2344
    helper.append_op(
        type='read_from_array',
        inputs={'X': [array], 'I': [i]},
        outputs={'Out': [out]},
    )
Y
Yu Yang 已提交
2345
    return out
Y
Yang Yu 已提交
2346 2347


2348
def shrink_memory(x, i, table):
2349
    """
Y
yuyang18 已提交
2350
    This function creates an operator to shrink rnn memory using the RankTable
2351
    as mentioned in the input parameter.
Y
yuyang18 已提交
2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371

    NOTE: This API is very low-level API. It is used by DynamicRNN only.

    Since the Dynamic RNN uses no-padding way to implement RNN. The sequence
    will be sorted by order, and the length of valid memory will be shrink after
    each time step.

    Args:
        x(Variable): The memory object in the previous time step.
        i(Variable): The step count variable. A int scalar as LoDTensor.
        table(Variable): The RNNRankTable object.

    Returns:
        the memory variable after shrink.

    Examples:

        Since this API is very low level API. The example is not provided.
        Please reference the implementation of class DynamicRNN for detail
        usage.
2372
    """
Y
Yang Yu 已提交
2373
    helper = LayerHelper('shrink_memory', **locals())
2374 2375 2376
    check_type(x, 'x', Variable, 'shrink_memory')
    check_type(i, 'i', Variable, 'shrink_memory')
    check_type(table, 'table', Variable, 'shrink_memory')
X
Xin Pan 已提交
2377
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
2378 2379 2380 2381 2382 2383
    helper.append_op(
        type='shrink_rnn_memory',
        inputs={'X': [x], 'I': [i], 'RankTable': [table]},
        outputs={'Out': [out]},
        attrs={},
    )
Y
Yang Yu 已提交
2384
    return out
Y
Yang Yu 已提交
2385 2386


2387
def array_length(array):
2388
    """
2389
    This OP is used to get the length of the input array :ref:`api_fluid_LoDTensorArray` .
2390
    It can be used together with :ref:`api_fluid_layers_array_read` , :ref:`api_fluid_layers_array_write` ,
T
tianshuo78520a 已提交
2391
    :ref:`api_fluid_layers_While` OP to traverse, read and write LoDTensorArray.
2392

K
kavyasrinet 已提交
2393
    Args:
2394
        array (LoDTensorArray): The input array that will be used to compute the length.
K
kavyasrinet 已提交
2395 2396

    Returns:
2397
        Variable: 1-D Tensor with shape [1], which is the length of array. Datatype: int64.
K
kavyasrinet 已提交
2398 2399

    Examples:
Q
qiaolongfei 已提交
2400
        .. code-block:: python
K
kavyasrinet 已提交
2401

2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417
            import paddle.fluid as fluid
            tmp = fluid.layers.zeros(shape=[10], dtype='int32')
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
            # tmp is 1-D Tensor with shape [10]. We write tmp into arr on subscript 10,
            # then the length of arr becomes 11.
            arr = fluid.layers.array_write(tmp, i=i)
            # return the length of arr
            arr_len = fluid.layers.array_length(arr)

            # You can use executor to print out the length of LoDTensorArray.
            input = fluid.layers.Print(arr_len, message="The length of LoDTensorArray:")
            main_program = fluid.default_main_program()
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(main_program)

            # The printed result is:
Q
qiaolongfei 已提交
2418

2419 2420 2421 2422 2423
            # 1569576542  The length of LoDTensorArray:   The place is:CPUPlace
            # Tensor[array_length_0.tmp_0]
            #    shape: [1,]
            #    dtype: l
            #    data: 11,
2424

2425 2426 2427
            # 1-D Tensor with shape [1], whose value is 11. It means that the length of LoDTensorArray
            # is 11.
            # dtype is the corresponding C++ data type, which may vary in different environments.
2428 2429
            # Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
            #       so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
2430
            #       and '__int64' on Windows. They both represent 64-bit integer variables.
2431
    """
2432

J
Jiabin Yang 已提交
2433
    if _non_static_mode():
2434
        assert isinstance(
2435 2436
            array, list
        ), "The 'array' in array_write must be a list in dygraph mode"
2437 2438
        return len(array)

2439 2440 2441 2442
    if (
        not isinstance(array, Variable)
        or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY
    ):
2443
        raise TypeError(
2444 2445
            "array should be tensor array vairable in array_length Op"
        )
2446

Y
Yang Yu 已提交
2447
    helper = LayerHelper('array_length', **locals())
X
Xin Pan 已提交
2448
    tmp = helper.create_variable_for_type_inference(dtype='int64')
Y
Yang Yu 已提交
2449
    tmp.stop_gradient = True
2450 2451 2452
    helper.append_op(
        type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]}
    )
Y
Yang Yu 已提交
2453
    return tmp
Y
Yu Yang 已提交
2454 2455 2456


class ConditionalBlockGuard(BlockGuard):
F
fengjiayi 已提交
2457
    """
2458 2459 2460
    ConditionalBlockGuard is derived from BlockGuard. It is dedicated for
    holding a ConditionalBlock, and helping users entering and exiting the
    ConditionalBlock via Python's 'with' keyword. However, ConditionalBlockGuard
F
fengjiayi 已提交
2461 2462 2463
    is generally an internal component of IfElse, users should not use it directly.
    """

Y
Yu Yang 已提交
2464
    def __init__(self, block):
2465
        check_type(block, "block", ConditionalBlock, "ConditionalBlockGuard")
2466
        super().__init__(block.helper.main_program)
Y
Yu Yang 已提交
2467 2468 2469
        self.block = block

    def __enter__(self):
2470
        return super().__enter__()
Y
Yu Yang 已提交
2471 2472 2473

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.block.complete()
2474
        return super().__exit__(exc_type, exc_val, exc_tb)
Y
Yu Yang 已提交
2475 2476


2477
class ConditionalBlock:
Y
Yan Chunwei 已提交
2478 2479 2480 2481 2482 2483 2484 2485
    '''
    **ConditionalBlock**

    ConditionalBlock is an operator that bind a block to a specific condition,
    if the condition matches, the corresponding block will be executed.

    Args:
        inputs (Variable): bool conditions.
T
tianshuo78520a 已提交
2486
        is_scalar_condition (bool): whether the branch is controlled by a scalar.
Y
Yan Chunwei 已提交
2487 2488 2489 2490 2491
        name(str): name of this ConditionalBlock.

    Examples:
        .. code-block:: python

2492
             import paddle.fluid as fluid
Y
Yan Chunwei 已提交
2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
             cond = layers.less_than(x=label, y=limit)
             true_image, false_image = layers.split_lod_tensor(
                 input=image, mask=cond)
             true_cond = layers.ConditionalBlock([true_image])

             with true_cond.block():
                 ...
             with false_cond.block():
                 ...
    '''

2504
    def __init__(self, inputs, is_scalar_condition=False, name=None):
Y
Yu Yang 已提交
2505
        for each_input in inputs:
2506
            check_type(each_input, "input", Variable, "ConditionalBlock")
Y
Yu Yang 已提交
2507
        self.inputs = inputs
2508
        self.is_scalar_condition = is_scalar_condition
2509
        self.helper = LayerHelper('conditional_block', name=name)
Y
Yu Yang 已提交
2510 2511 2512 2513 2514 2515 2516 2517 2518 2519

    def block(self):
        return ConditionalBlockGuard(self)

    def complete(self):
        inside_block = self.helper.main_program.current_block()
        parent_block = self.helper.main_program.block(inside_block.parent_idx)

        intermediate = set()
        params = set()
2520 2521 2522
        params, intermediate = get_inputs_outputs_in_block(
            inside_block, params, intermediate, helper=self.helper
        )
Y
Yu Yang 已提交
2523

2524 2525 2526
        # Todo(liym27) Here assume that all params are in recursive parent block
        # but when minimize() called in control flow, some params may be in
        # conditional grad block
Y
Yu Yang 已提交
2527
        param_list = [
W
Wu Yi 已提交
2528
            parent_block._var_recursive(each_name) for each_name in params
Y
Yu Yang 已提交
2529 2530
        ]

X
Xin Pan 已提交
2531 2532 2533 2534 2535
        out_list = []
        for inner_out_name in intermediate:
            inner_var = parent_block._find_var_recursive(inner_out_name)
            if inner_var:
                out_list.append(inner_var)
Y
Yu Yang 已提交
2536 2537

        step_scope = parent_block.create_var(
2538 2539
            type=core.VarDesc.VarType.STEP_SCOPES
        )
2540
        conditional_block_op = parent_block.append_op(
Y
Yu Yang 已提交
2541 2542
            type='conditional_block',
            inputs={
2543 2544
                'Cond': self.inputs,
                'Input': param_list,
Y
Yu Yang 已提交
2545
            },
2546
            outputs={'Out': out_list, 'Scope': [step_scope]},
2547 2548
            attrs={
                'sub_block': inside_block,
2549 2550 2551
                'is_scalar_condition': self.is_scalar_condition,
            },
        )
2552

2553
        if self.need_append_conditional_block_grad(inside_block):
2554 2555 2556
            self.append_conditional_block_grad(
                parent_block, inside_block, conditional_block_op
            )
2557 2558 2559

    def need_append_conditional_block_grad(self, inside_block):
        grad_sub_block_idx = inside_block.backward_block_idx
2560
        inside_block_idx = inside_block.idx
2561

2562 2563
        # if inside_block have grad_block and grad_block is not itself,
        # we will append conditional block grad.
2564 2565 2566
        return (
            grad_sub_block_idx != -1 and grad_sub_block_idx != inside_block_idx
        )
2567

2568 2569 2570
    def append_conditional_block_grad(
        self, parent_block, inside_block, conditional_block_op
    ):
2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605
        '''
        Append op `conditional_block_grad` manually.
        When `optimizer.minimize/append_backward` is called in Paddle control flow,
        grad ops will be appended before appending op `conditional_block` so that
        op `conditional_block_grad` can't be appended when calling
        `optimizer.minimize/append_backward`. After appending op `conditional_block`,
        `conditional_block_grad` is appended manually.

        Args:
            parent_block (Block): The block that `conditional_block_op` blongs to.
            inside_block (Block): The sub block of `conditional_block_op`.
            conditional_block_op (Operator): The forward op conditional_block.
        '''

        grad_sub_block_idx = inside_block.backward_block_idx
        grad_sub_block = self.helper.main_program.block(grad_sub_block_idx)

        intermediate = set()
        params = set()

        for each_op in grad_sub_block.ops:
            assert isinstance(each_op, Operator)
            for iname in each_op.input_names:
                for in_var_name in each_op.input(iname):
                    if in_var_name not in intermediate:
                        params.add(in_var_name)

            for oname in each_op.output_names:
                for out_var_name in each_op.output(oname):
                    intermediate.add(out_var_name)

        param_list = []
        for inner_input_name in params:
            inner_var = parent_block._find_var_recursive(inner_input_name)
            if inner_var:
2606
                param_list.append(inner_var.name)
2607 2608

        grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
2609 2610
            conditional_block_op.desc, set(), [grad_sub_block.desc]
        )
2611 2612 2613 2614 2615 2616 2617 2618 2619

        # append op_desc in grad_op_descs to target_block
        op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
        backward = core.op_proto_and_checker_maker.OpRole.Backward
        new_op_desc = parent_block.desc.append_op()
        new_op_desc.copy_from(grad_op_desc[0])
        new_op_desc._set_attr(op_role_attr_name, backward)
        # set input and output manually
        new_op_desc.set_input('Input', param_list)
2620 2621 2622
        new_op_desc.set_output(
            'Input@GRAD', [param + "@GRAD" for param in param_list]
        )
2623 2624 2625

        new_vars = set()
        for grad_var_name in new_op_desc.output_arg_names():
2626 2627 2628 2629
            if (
                grad_sub_block.desc.has_var_recursive(grad_var_name.encode())
                or grad_var_name == core.empty_var_name()
            ):
2630
                continue
2631
            grad_sub_block.desc.var(grad_var_name.encode())
2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645
            new_vars.add(grad_var_name)
            if grad_var_name not in op_grad_to_var:
                continue

        # infer_shape and infer_type
        new_op_desc.infer_var_type(grad_sub_block.desc)
        new_op_desc.infer_shape(grad_sub_block.desc)

        for arg in new_op_desc.output_arg_names():
            if arg in new_vars:
                _infer_var_data_type_shape_(arg, grad_sub_block)

        self.helper.main_program._sync_with_cpp()

2646

2647
def copy_var_to_parent_block(var, layer_helper):
2648 2649
    if not isinstance(var, Variable):
        return var
2650 2651
    prog = layer_helper.main_program
    parent_idx = prog.current_block().parent_idx
2652 2653 2654
    assert (
        parent_idx >= 0
    ), "Got wrong parent block index when assigning var to parent scope in control_flow"
2655 2656
    parent_block = prog.block(parent_idx)

2657 2658 2659 2660
    if (
        var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY
        and parent_block._find_var_recursive(var.name)
    ):
2661 2662
        parent_block_var = var
    else:
2663 2664 2665
        parent_block_var = parent_block.create_var(
            dtype=var.dtype, shape=var.shape, type=var.type
        )
2666
        assign(var, parent_block_var)
2667 2668 2669
    return parent_block_var


2670
def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None):
2671
    """
2672 2673 2674 2675 2676 2677 2678 2679 2680
    This API returns ``true_fn()`` if the predicate ``pred`` is true else
    ``false_fn()`` . Users could also set ``true_fn`` or ``false_fn`` to
    ``None`` if do nothing and this API will treat the callable simply returns
    ``None`` in this case.

    ``true_fn`` and ``false_fn`` should return same nest structure of tensors
    or both return ``None`` if user doens't like to return anything. A nest
    structure of tensors in PaddlePaddle is tensor(s), or tuple of tensors, or
    list of tensors.
2681 2682

    Note:
2683 2684 2685 2686
        1. The tuples or lists returned by ``true_fn`` and ``false_fn`` must have
        the same shape because of dataflow model of PaddlePaddle while the
        tensors in the tuples or the lists can have different shapes.

2687 2688 2689
        2. This API could be used under both static mode or dygraph mode. If it
        is in dygraph mode, the API only runs one branch based on condition.

2690
        3. If it is in static mode, any tensors or operations created outside
2691 2692 2693
        or inside of ``true_fn`` and ``false_fn`` will be in net building
        regardless of which branch is selected at runtime. This has frequently
        surprised users who expected a lazy semantics. For example:
2694 2695

        .. code-block:: python
2696 2697 2698 2699 2700

            import paddle

            a = paddle.zeros((1, 1))
            b = paddle.zeros((1, 1))
2701
            c = a * b
2702
            out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b)
2703

2704 2705 2706
        No matter whether ``a < b`` , ``c = a * b`` will be in net building and
        run. ``a + c`` and ``b * b`` will be in net building, but only one
        branch will be executed during runtime.
2707 2708

    Args:
2709
        pred(Tensor): A boolean tensor whose numel should be 1. The boolean
2710
            value determines whether to return the result of ``true_fn`` or
2711 2712 2713 2714 2715 2716
            ``false_fn`` .
        true_fn(callable, optional): A callable to be performed if ``pred`` is
            true. The default value is ``None`` .
        false_fn(callable, optional): A callable to be performed if ``pred`` is
            false. The default value is ``None`` .
        name(str, optional): The default value is ``None`` . Normally users
2717
             don't have to set this parameter. For more information, please
2718
             refer to :ref:`api_guide_Name` .
2719 2720 2721
        return_names(sequence of string, optional): The default value is ``None`` .
             Normally users don't have to set this parameters.  A sequence of strings
             to represents the name of returned vars.  The structure of sequence must
2722
             be same with return values of true_fn and false_fn.
2723 2724

    Returns:
2725
        Tensor|list(Tensor)|tuple(Tensor): returns ``true_fn()`` if the
2726
        predicate ``pred`` is true else ``false_fn()`` .
2727 2728 2729

    Raises:
        TypeError: if ``true_fn`` or ``false_fn`` is not callable.
2730 2731
        ValueError: if ``true_fn`` and ``false_fn`` don't return the same nest
            structure of tensors.
2732 2733 2734 2735

    Examples:
        .. code-block:: python

2736
            import paddle
2737 2738 2739 2740 2741 2742 2743 2744 2745 2746

            #
            # pseudocode:
            # if 0.1 < 0.23:
            #     return 1, True
            # else:
            #     return 3, 2
            #

            def true_func():
2747 2748 2749 2750
                return paddle.full(shape=[1, 2], dtype='int32',
                                   fill_value=1), paddle.full(shape=[2, 3],
                                                              dtype='bool',
                                                              fill_value=True)
2751

2752 2753

            def false_func():
2754 2755 2756 2757 2758
                return paddle.full(shape=[3, 4], dtype='float32',
                                   fill_value=3), paddle.full(shape=[4, 5],
                                                              dtype='int64',
                                                              fill_value=2)

2759

2760 2761
            x = paddle.full(shape=[1], dtype='float32', fill_value=0.1)
            y = paddle.full(shape=[1], dtype='float32', fill_value=0.23)
2762
            pred = paddle.less_than(x=x, y=y, name=None)
2763
            ret = paddle.static.nn.cond(pred, true_func, false_func)
2764
            # ret is a tuple containing 2 tensors
2765 2766
            # ret[0] = [[1 1]]
            # ret[1] = [[ True  True  True]
2767
            #           [ True  True  True]]
2768

2769
    """
J
Jiabin Yang 已提交
2770
    if _non_static_mode():
2771
        assert isinstance(pred, Variable), "The pred in cond must be Variable"
C
crystal 已提交
2772
        assert pred.size == 1, "condition input's numel should be 1"
2773 2774 2775 2776 2777
        pred = pred.numpy()[0]
        if pred:
            if true_fn is not None:
                if not callable(true_fn):
                    raise TypeError(
2778 2779 2780 2781
                        "The true_fn in cond must be callable, but received {}".format(
                            type(true_fn).__name__
                        )
                    )
2782 2783 2784 2785 2786
                return true_fn()
        else:
            if false_fn is not None:
                if not callable(false_fn):
                    raise TypeError(
2787 2788 2789 2790
                        "The false_fn in cond must be callable, but received {}".format(
                            type(false_fn).__name__
                        )
                    )
2791 2792 2793
                return false_fn()
        return None

2794 2795
    check_variable_and_dtype(pred, "pred", ['bool'], "fluid.layers.cond")
    check_type(name, "name", (str, type(None)), "fluid.layers.cond")
2796 2797 2798
    helper = LayerHelper('cond', **locals())
    true_output = None
    false_output = None
2799
    copy_to_parent_func = lambda var: copy_var_to_parent_block(var, helper)
2800 2801
    if true_fn is not None:
        if not callable(true_fn):
2802 2803
            raise TypeError(
                "The true_fn in cond must be callable, but received {}".format(
2804 2805 2806
                    type(true_fn).__name__
                )
            )
2807 2808 2809 2810
        true_cond_block = ConditionalBlock([pred], is_scalar_condition=True)
        with true_cond_block.block():
            origin_true_output = true_fn()
            if origin_true_output is not None:
2811 2812 2813
                true_output = map_structure(
                    copy_to_parent_func, origin_true_output
                )
2814 2815
    if false_fn is not None:
        if not callable(false_fn):
2816 2817
            raise TypeError(
                "The false_fn in cond must be callable, but received {}".format(
2818 2819 2820 2821
                    type(false_fn).__name__
                )
            )
        false_cond_block = ConditionalBlock(
2
201716010711 已提交
2822
            [paddle.logical_not(pred)], is_scalar_condition=True
2823
        )
2824 2825 2826
        with false_cond_block.block():
            origin_false_output = false_fn()
            if origin_false_output is not None:
2827 2828 2829
                false_output = map_structure(
                    copy_to_parent_func, origin_false_output
                )
2830 2831 2832 2833 2834 2835 2836

    if true_output is None and false_output is None:
        return None

    if true_output is None:
        raise ValueError(
            "Incompatible return values of true_fn and false_fn in cond: "
2837 2838
            "true_fn returns None while false_fn returns non-None"
        )
2839 2840 2841
    if false_output is None:
        raise ValueError(
            "Incompatible return values of true_fn and false_fn in cond: "
2842 2843
            "true_fn returns non-None while false_fn returns None"
        )
2844

2845
    # Merge true and false output if they are not None
2846
    if return_names is None:
2847
        is_dy2staic = False
2848
        return_names = ["no name"] * len(_to_sequence_except_dict(true_output))
2849
    else:
2850
        """
2851 2852
        dy2static will set the return_names and expand the return values to UndefinedVar.
        """
2853 2854 2855 2856 2857 2858 2859
        is_dy2staic = True

        # TODO:  expand_undefined_var will replace None to Undefinedvar(), to fix cases like:
        #       a = None
        #       if condition:
        #           a = 1
        # Because we can not use variable to express 'None'
2860
        true_output, false_output = expand_undefined_var(
2861 2862
            true_output, false_output, return_names
        )
2863

2864 2865 2866
    if len(_to_sequence_except_dict(true_output)) != len(
        _to_sequence_except_dict(false_output)
    ):
2867
        raise ValueError(
2868
            "true fn returns {} vars, but false fn returns {} vars, which is not equals".format(
2869 2870
                len(_to_sequence_except_dict(true_output)),
                len(_to_sequence_except_dict(false_output)),
2871 2872 2873
            )
        )
    for true_out, false_out, return_name in zip(
2874 2875 2876
        _to_sequence_except_dict(true_output),
        _to_sequence_except_dict(false_output),
        _to_sequence_except_dict(return_names),
2877
    ):
2878 2879 2880 2881
        try:
            assert_same_structure(true_out, false_out, check_types=False)
        except ValueError as e:
            raise ValueError(
2882 2883 2884 2885
                "Incompatible return values of `{}` in true_fn and false_fn in cond: {}".format(
                    return_name, e
                )
            )
2886

2887
    def check_ret_none(seq_true, seq_false, seq_names):
2888 2889 2890
        for f_true, f_false, f_name in zip(seq_true, seq_false, seq_names):
            f_true = flatten(f_true)
            f_false = flatten(f_false)
2891
            for idx in range(len(f_true)):
2892 2893 2894 2895 2896 2897
                if (
                    f_true[idx] is None
                    and f_false[idx] is not None
                    or f_false[idx] is None
                    and f_true[idx] is not None
                ):
2898 2899 2900 2901
                    warnings.warn(
                        "In cond : Var '{}' or part of it is set differently in ifelse branchs, "
                        "<{}, {}> in true branch and <{}, {}> in false branch. Set var to "
                        "'None' in ifelse block might lead to error.".format(
2902
                            f_name,
2903 2904 2905 2906 2907 2908 2909 2910
                            type(f_true[idx]),
                            f_true[idx],
                            type(f_false[idx]),
                            f_false[idx],
                        )
                    )

    check_ret_none(
2911 2912 2913
        _to_sequence_except_dict(true_output),
        _to_sequence_except_dict(false_output),
        _to_sequence_except_dict(return_names),
2914
    )
2915 2916 2917

    if is_dy2staic:
        true_output, false_output = change_none_to_undefinedvar(
2918 2919
            true_output, false_output
        )
2920

2921
    mask = cast(pred, dtype='int32')
2922 2923 2924 2925 2926
    merge_func = (
        lambda name, false_var, true_var: select_input_with_buildin_type(
            [false_var, true_var], mask, name
        )
    )
2927 2928 2929 2930 2931

    def merge_every_var_list(false_vars, true_vars, name):
        return map_structure(partial(merge_func, name), false_vars, true_vars)

    merged_output = list(
2932 2933
        map(
            merge_every_var_list,
2934 2935 2936
            _to_sequence_except_dict(false_output),
            _to_sequence_except_dict(true_output),
            _to_sequence_except_dict(return_names),
2937 2938
        )
    )
2939
    merged_output = pack_sequence_as(false_output, flatten(merged_output))
2940 2941 2942
    return merged_output


2943 2944 2945 2946
def change_none_to_undefinedvar(nest1, nest2):
    from paddle.fluid.dygraph.dygraph_to_static.utils import UndefinedVar

    def map_fn(x):
2947 2948
        if x is None:
            return UndefinedVar("padding")
2949 2950 2951 2952 2953 2954 2955
        return x

    nest1_out = pack_sequence_as(nest1, list(map(map_fn, flatten(nest1))))
    nest2_out = pack_sequence_as(nest2, list(map(map_fn, flatten(nest2))))
    return nest1_out, nest2_out


2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973
def _to_sequence_except_dict(x):
    """
    In this function, dict is not viewed as sequence.
    """
    if isinstance(x, dict):
        return [x]
    return to_sequence(x)


def _is_sequence_except_dict(x):
    """
    In this function, dict is not viewed as sequence.
    """
    if isinstance(x, dict):
        return False
    return is_sequence(x)


2974
def expand_undefined_var(nest1, nest2, names):
2975 2976 2977 2978
    """TODO: make this function recursively.
    nest1: Var1, (UndefinedVar, [1,2,3])
    nest2: Var2, ([1,2,3,4], UndefinedVar)
    In this case, we should not expand recursively.
2979
    """
2980
    from paddle.fluid.dygraph.dygraph_to_static.utils import UndefinedVar
2981 2982 2983
    from paddle.fluid.dygraph.dygraph_to_static.return_transformer import (
        RETURN_VALUE_PREFIX,
    )
2984 2985

    def pack_undefined_var_as(seq):
2986 2987 2988
        return pack_sequence_as(
            seq, [UndefinedVar("padding") for i in flatten(seq)]
        )
2989

2990
    def map_fn(n1, n2, name, order):
2991 2992 2993
        if not name.startswith(RETURN_VALUE_PREFIX) and (
            isinstance(n1, UndefinedVar) or n1 is None
        ):
2994 2995 2996 2997 2998 2999
            if n1 is None and n2 is not None:
                if order == 0:
                    warnings.warn(
                        "In cond : Var '{}' or part of it is set differently in ifelse branchs, "
                        "<{}, {}> in true branch and <{}, {}> in false branch. Set var to "
                        "'None' in ifelse block might lead to error.".format(
3000 3001 3002
                            name, type(n1), n1, type(n2), n2
                        )
                    )
3003 3004 3005 3006 3007
                else:
                    warnings.warn(
                        "In cond : Var '{}' or part of it is set differently in ifelse branchs, "
                        "<{}, {}> in true branch and <{}, {}> in false branch. Set var to "
                        "'None' in ifelse block might lead to error.".format(
3008 3009 3010
                            name, type(n2), n2, type(n1), n1
                        )
                    )
3011 3012 3013 3014
            return pack_undefined_var_as(n2)
        return n1

    nest1_out = list(
3015 3016
        map(
            map_fn,
3017 3018 3019 3020
            _to_sequence_except_dict(nest1),
            _to_sequence_except_dict(nest2),
            _to_sequence_except_dict(names),
            [0 for i in _to_sequence_except_dict(names)],
3021 3022
        )
    )
3023
    nest2_out = list(
3024 3025
        map(
            map_fn,
3026 3027 3028 3029
            _to_sequence_except_dict(nest2),
            _to_sequence_except_dict(nest1),
            _to_sequence_except_dict(names),
            [1 for i in _to_sequence_except_dict(names)],
3030 3031
        )
    )
3032
    if not _is_sequence_except_dict(nest1):
3033
        nest1_out = nest1_out[0]
3034
    if not _is_sequence_except_dict(nest2):
3035
        nest2_out = nest2_out[0]
3036 3037 3038
    return nest1_out, nest2_out


L
liym27 已提交
3039
def _error_message(what, arg_name, op_name, right_value, error_value):
3040 3041
    error_message = (
        "{what} of '{arg_name}' in {op_name} must be "
L
liym27 已提交
3042
        "{right_value}, but received: {error_value}.".format(
3043 3044 3045 3046 3047 3048 3049
            what=what,
            arg_name=arg_name,
            op_name=op_name,
            right_value=right_value,
            error_value=error_value,
        )
    )
L
liym27 已提交
3050 3051 3052 3053 3054 3055

    return error_message


def case(pred_fn_pairs, default=None, name=None):
    '''
3056 3057
    :api_attr: Static Graph

L
liym27 已提交
3058 3059 3060 3061 3062 3063 3064 3065
    This operator works like an if-elif-elif-else chain.

    Args:
        pred_fn_pairs(list|tuple): A list or tuple of (pred, fn) pairs. ``pred`` is a boolean Tensor with shape [1], ``fn`` is a callable. All callables return the same structure of Tensors.
        default(callable, optional): Callable that returns a structure of Tensors.
        name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.

    Returns:
3066
        Tensor|list(Tensor): Tensors returned by the callable from the first pair whose pred is True,
L
liym27 已提交
3067 3068 3069 3070 3071 3072 3073
        or Tensors returned by ``default`` if no pred in ``pred_fn_pairs`` is True and ``default`` is not None,
        or Tensors returned by the last callable in ``pred_fn_pairs``  if no pred in ``pred_fn_pairs`` is True and ``default`` is None.

    Raises:
        TypeError: If the type of ``pred_fn_pairs`` is not list or tuple.
        TypeError: If the type of elements in ``pred_fn_pairs`` is not tuple.
        TypeError: If the size of tuples in ``pred_fn_pairs`` is not 2.
3074
        TypeError: If the first element of 2-tuple in ``pred_fn_pairs`` is not a Tensor.
L
liym27 已提交
3075 3076 3077 3078 3079 3080
        TypeError: If the second element of 2-tuple in ``pred_fn_pairs`` is not callable.
        TypeError: If ``default`` is not None but it is not callable.

    Examples:
        .. code-block:: python

3081 3082 3083
            import paddle

            paddle.enable_static()
L
liym27 已提交
3084 3085

            def fn_1():
3086
                return paddle.full(shape=[1, 2], dtype='float32', fill_value=1)
L
liym27 已提交
3087 3088

            def fn_2():
3089
                return paddle.full(shape=[2, 2], dtype='int32', fill_value=2)
L
liym27 已提交
3090 3091

            def fn_3():
3092
                return paddle.full(shape=[3], dtype='int32', fill_value=3)
L
liym27 已提交
3093

3094 3095 3096 3097
            main_program = paddle.static.default_startup_program()
            startup_program = paddle.static.default_main_program()

            with paddle.static.program_guard(main_program, startup_program):
3098 3099 3100
                x = paddle.full(shape=[1], dtype='float32', fill_value=0.3)
                y = paddle.full(shape=[1], dtype='float32', fill_value=0.1)
                z = paddle.full(shape=[1], dtype='float32', fill_value=0.2)
L
liym27 已提交
3101

3102 3103 3104
                pred_1 = paddle.less_than(z, x)  # true: 0.2 < 0.3
                pred_2 = paddle.less_than(x, y)  # false: 0.3 < 0.1
                pred_3 = paddle.equal(x, y)      # false: 0.3 == 0.1
L
liym27 已提交
3105 3106

                # Call fn_1 because pred_1 is True
3107
                out_1 = paddle.static.nn.case(
L
liym27 已提交
3108 3109 3110 3111
                    pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3)

                # Argument default is None and no pred in pred_fn_pairs is True. fn_3 will be called.
                # because fn_3 is the last callable in pred_fn_pairs.
3112
                out_2 = paddle.static.nn.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)])
L
liym27 已提交
3113

3114
                exe = paddle.static.Executor(paddle.CPUPlace())
L
liym27 已提交
3115 3116 3117 3118 3119 3120 3121 3122 3123 3124
                res_1, res_2 = exe.run(main_program, fetch_list=[out_1, out_2])
                print(res_1)  # [[1. 1.]]
                print(res_2)  # [3 3 3]
    '''
    helper = LayerHelper('case', **locals())

    def _case_check_args(pred_fn_pairs, default):
        '''
        Check arguments pred_fn_pairs and default. Return canonical pre_fn_pairs and default.
        '''
3125
        check_type(pred_fn_pairs, 'pred_fn_pairs', (list, tuple), 'case')
L
liym27 已提交
3126 3127 3128 3129

        for pred_fn in pred_fn_pairs:
            if not isinstance(pred_fn, tuple):
                raise TypeError(
3130 3131 3132 3133 3134 3135 3136 3137
                    _error_message(
                        "The elements' type",
                        "pred_fn_pairs",
                        "case",
                        tuple,
                        type(pred_fn),
                    )
                )
L
liym27 已提交
3138 3139
            if len(pred_fn) != 2:
                raise TypeError(
3140 3141 3142 3143 3144 3145 3146 3147
                    _error_message(
                        "The tuple's size",
                        "pred_fn_pairs",
                        "case",
                        "2",
                        str(len(pred_fn)) + "-tuple",
                    )
                )
L
liym27 已提交
3148 3149 3150 3151
            pred, fn = pred_fn

            if not isinstance(pred, Variable):
                raise TypeError(
3152 3153 3154 3155 3156 3157 3158 3159
                    _error_message(
                        "The pred's type",
                        "pred_fn_pairs",
                        "case",
                        "boolean Variable",
                        type(pred),
                    )
                )
L
liym27 已提交
3160 3161 3162 3163

            if not callable(fn):
                raise TypeError(
                    "The fn for {} of pred_fn_pairs in Op(case) must"
3164 3165
                    " be callable.".format(pred.name)
                )
L
liym27 已提交
3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186

        if default is None:
            default_index = len(pred_fn_pairs) - 1  # pick the last one
            default = pred_fn_pairs[default_index][1]
            pred_fn_pairs = pred_fn_pairs[:default_index]
        elif not callable(default):
            raise TypeError("The default in Op(case) must be callable.")

        return pred_fn_pairs, default

    pred_fn_pairs, default = _case_check_args(pred_fn_pairs, default)

    false_fn = default
    for pred, true_fn in reversed(pred_fn_pairs):
        false_fn = partial(cond, pred=pred, true_fn=true_fn, false_fn=false_fn)

    final_fn = false_fn

    return final_fn()


3187
class Switch:
Q
qiaolongfei 已提交
3188
    """
3189
    :api_attr: Static Graph
Q
qiaolongfei 已提交
3190

3191 3192 3193 3194 3195
    This class is used to implement Switch branch control function.
    Switch branch contains several case branches and one default branch.
    Switch control flow checks whether the case branch conditions are satisfied in turn,
    and only executes the statement after the first case branch that satisfies the conditions.
    If there is no case branch that satisfies the condition,
3196 3197
    only the statement following the default branch is executed.

3198 3199 3200 3201
    Note:
        A new OP :ref:`api_fluid_layers_case` is highly recommended instead of ``Switch`` if the shape of parameter ``cond`` is [1].
        OP :ref:`api_fluid_layers_case` is easier to use and is called with less code but does the same thing as ``Switch`` .

3202
    Member Functions:
3203
        case(condition): The case branch of Switch whose parameter cond is a scalar Variable of bool type. Only if the cond of the current case branch is True and the cond of the previous case branch is False, the statement after the case branch will be executed, and the statement after the case branch will not be executed.
3204

3205 3206 3207 3208 3209
        default(): The default branch of Switch. When cond of all case branches is False, the statement after default branch is executed.

    Case and default functions can only be used inside the scope of Switch, as shown below:

    .. code-block:: python
3210

3211 3212 3213 3214 3215 3216 3217 3218 3219
        '''
        with fluid.layers.Switch() as switch:
            with switch.case(cond1):
                i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=1)
            with switch.case(cond2):
                i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=2)
            with switch.default():
                i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
        '''
Q
qiaolongfei 已提交
3220

3221 3222
    Args:
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name` .
Q
qiaolongfei 已提交
3223 3224 3225

    Examples:
        .. code-block:: python
3226

3227
            import paddle.fluid as fluid
Q
qiaolongfei 已提交
3228

3229
            lr = fluid.layers.create_global_var(
Q
qiaolongfei 已提交
3230 3231 3232 3233 3234
                shape=[1],
                value=0.0,
                dtype='float32',
                persistable=True,
                name="learning_rate")
3235
            zero_var = fluid.layers.fill_constant(
3236
                shape=[1], dtype='float32', value=0.0)
3237
            one_var = fluid.layers.fill_constant(
Q
qiaolongfei 已提交
3238
                shape=[1], dtype='float32', value=1.0)
3239
            two_var = fluid.layers.fill_constant(
3240
                shape=[1], dtype='float32', value=2.0)
3241

3242
            global_step = fluid.layers.autoincreased_step_counter(counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
Q
qiaolongfei 已提交
3243 3244

            with fluid.layers.control_flow.Switch() as switch:
Q
qiaolongfei 已提交
3245
                with switch.case(global_step == zero_var):
3246
                    fluid.layers.assign(input=one_var, output=lr)
Q
qiaolongfei 已提交
3247
                with switch.default():
3248
                    fluid.layers.assign(input=two_var, output=lr)
Q
qiaolongfei 已提交
3249

3250 3251 3252 3253 3254
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())

            res = exe.run(fluid.default_main_program(), feed={}, fetch_list=[lr])
            print(res) # [array([1.], dtype=float32)]
Q
qiaolongfei 已提交
3255 3256
    """

3257 3258 3259 3260 3261 3262 3263 3264 3265
    def __init__(self, name=None):
        self.helper = LayerHelper('switch', name=name)
        self.inside_scope = False
        self.pre_not_conditions = []

    def case(self, condition):
        if not self.inside_scope:
            raise ValueError("case should be called inside with")

3266
        check_variable_and_dtype(
3267 3268 3269 3270 3271
            condition,
            'condition',
            ['bool'],
            'the member function case of fluid.layers.Switch',
        )
3272

3273 3274
        if len(self.pre_not_conditions) == 0:
            cond_block = ConditionalBlock([condition], is_scalar_condition=True)
2
201716010711 已提交
3275
            not_cond = paddle.logical_not(x=condition)
3276 3277 3278 3279
            self.pre_not_conditions.append(not_cond)
        else:
            pre_cond_num = len(self.pre_not_conditions)
            pre_not_cond = self.pre_not_conditions[pre_cond_num - 1]
3280
            new_not_cond = paddle.logical_and(
2
201716010711 已提交
3281
                x=pre_not_cond, y=paddle.logical_not(x=condition)
3282
            )
3283 3284
            self.pre_not_conditions.append(new_not_cond)
            cond_block = ConditionalBlock(
3285
                [paddle.logical_and(x=pre_not_cond, y=condition)],
3286 3287
                is_scalar_condition=True,
            )
3288 3289 3290 3291 3292 3293 3294 3295 3296

        return ConditionalBlockGuard(cond_block)

    def default(self):
        pre_cond_num = len(self.pre_not_conditions)
        if pre_cond_num == 0:
            raise ValueError("there should be at least one condition")
        cond_block = ConditionalBlock(
            [self.pre_not_conditions[pre_cond_num - 1]],
3297 3298
            is_scalar_condition=True,
        )
3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314
        return ConditionalBlockGuard(cond_block)

    def __enter__(self):
        """
        set flag that now is inside switch.block {}
        :return:
        """
        self.inside_scope = True
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.inside_scope = False
        if exc_type is not None:
            return False  # re-raise exception

        return True
Y
Yu Yang 已提交
3315 3316


3317
class IfElseBlockGuard:
Y
Yu Yang 已提交
3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337
    def __init__(self, is_true, ifelse):
        if not isinstance(ifelse, IfElse):
            raise TypeError("ifelse must be an instance of IfElse class")

        if ifelse.status != IfElse.OUT_IF_ELSE_BLOCKS:
            raise ValueError("You cannot invoke IfElse.block() inside a block")

        self.is_true = is_true
        self.ie = ifelse
        if is_true:
            self.cond_block = ifelse.conditional_true_block
        else:
            self.cond_block = ifelse.conditional_false_block

        if not isinstance(self.cond_block, ConditionalBlock):
            raise TypeError("Unexpected situation")

        self.cond_block = self.cond_block.block()

    def __enter__(self):
3338 3339 3340 3341 3342
        self.ie.status = (
            IfElse.IN_IF_ELSE_TRUE_BLOCKS
            if self.is_true
            else IfElse.IN_IF_ELSE_FALSE_BLOCKS
        )
Y
Yu Yang 已提交
3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353
        self.cond_block.__enter__()

    def __exit__(self, exc_type, exc_val, exc_tb):
        if not self.cond_block.__exit__(exc_type, exc_val, exc_tb):
            # re-raise inside exception
            return False
        if len(self.ie.output_table[1 if self.is_true else 0]) == 0:
            raise ValueError("Must set output inside block")
        self.ie.status = IfElse.OUT_IF_ELSE_BLOCKS


3354
class IfElse:
X
Xin Pan 已提交
3355
    """
3356 3357
    :api_attr: Static Graph

3358 3359 3360 3361
    This class is used to implement IfElse branch control function. IfElse contains two blocks, true_block and false_block. IfElse will put data satisfying True or False conditions into different blocks to run.

    Cond is a 2-D Tensor with shape [N, 1] and data type bool, representing the execution conditions of the corresponding part of the input data.

3362 3363 3364 3365
    Note:
        A new OP :ref:`api_fluid_layers_cond` is highly recommended instead of ``IfElse``. if the shape of parameter ``cond`` is [1].
        OP :ref:`api_fluid_layers_cond` is easier to use and is called with less code but does the same thing as ``IfElse`` .

3366 3367 3368
    IfElse OP is different from other OPs in usage, which may cause some users confusion. Here is a simple example to illustrate this OP.

    .. code-block:: python
3369

3370 3371 3372 3373 3374 3375 3376 3377 3378
        # The following code completes the function: subtract 10 from the data greater than 0 in x, add 10 to the data less than 0 in x, and sum all the data.
        import numpy as np
        import paddle.fluid as fluid

        x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32', append_batch_size=False)
        y = fluid.layers.data(name='y', shape=[4, 1], dtype='float32', append_batch_size=False)

        x_d = np.array([[3], [1], [-2], [-3]]).astype(np.float32)
        y_d = np.zeros((4, 1)).astype(np.float32)
3379

3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397
        # Compare the size of x, y pairs of elements, output cond, cond is shape [4, 1], data type bool 2-D tensor.
        # Based on the input data x_d, y_d, it can be inferred that the data in cond are [[true], [true], [false], [false]].
        cond = fluid.layers.greater_than(x, y)
        # Unlike other common OPs, ie below returned by the OP is an IfElse OP object
        ie = fluid.layers.IfElse(cond)

        with ie.true_block():
            # In this block, according to cond condition, the data corresponding to true dimension in X is obtained and subtracted by 10.
            out_1 = ie.input(x)
            out_1 = out_1 - 10
            ie.output(out_1)
        with ie.false_block():
            # In this block, according to cond condition, get the data of the corresponding condition in X as false dimension, and add 10
            out_1 = ie.input(x)
            out_1 = out_1 + 10
            ie.output(out_1)

        # According to cond condition, the data processed in the two blocks are merged. The output here is output, the type is List, and the element type in List is Variable.
3398
        output = ie() #  [array([[-7.], [-9.], [ 8.], [ 7.]], dtype=float32)]
3399 3400

        # Get the first Variable in the output List and add all elements.
3401
        out = paddle.sum(output[0])
3402 3403 3404 3405 3406

        exe = fluid.Executor(fluid.CPUPlace())
        exe.run(fluid.default_startup_program())

        res = exe.run(fluid.default_main_program(), feed={"x":x_d, "y":y_d}, fetch_list=[out])
3407
        print(res)
3408
        # [array([-1.], dtype=float32)]
X
Xin Pan 已提交
3409 3410

    Args:
3411 3412
        cond (Variable): cond is a 2-D Tensor with shape [N, 1] and data type bool, representing the corresponding execution conditions of N input data. The data type is bool.
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name` .
X
Xin Pan 已提交
3413

3414 3415
    Returns:
        Unlike other common OPs, the OP call returns an IfElse OP object (e.g. ie in the example), which branches the input data by calling the internal functions of the object ``true_block ()``, ``false_block ()``, ``input ()``, ``output ()``, and integrates the data processed by different branches as the overall output by calling the internal ``call ()`` function. The output type is a list, and the type of each element in the list is Variable.
X
Xin Pan 已提交
3416

3417 3418
    Internal Functions:
        The block is constructed by calling the ``with ie. true_block()`` function in the object, and the computational logic under condition true is put into the block. If no corresponding block is constructed, the input data in the corresponding conditional dimension is unchanged.
3419

3420 3421 3422 3423 3424 3425 3426
        The block is constructed by calling the ``with ie. false_block()`` function in the object, and the computational logic under condition false is put into the block. If no corresponding block is constructed, the input data in the corresponding conditional dimension is unchanged.

        ``Out = ie. input (x)`` will take out the data of the corresponding conditional dimension in X and put it into out, supporting the internal processing of multiple inputs in block.

        ``ie. output (out)`` writes the result to the output of the corresponding condition.

        There is a ``call ()`` function inside the object, that is, by calling ``output = ie ()``, all the outputs inside the block of False are fused as the whole output, the output type is a list, and the type of each element in the list is Variable.
3427

X
Xin Pan 已提交
3428
    """
3429

Y
Yu Yang 已提交
3430 3431 3432 3433
    OUT_IF_ELSE_BLOCKS = 0
    IN_IF_ELSE_TRUE_BLOCKS = 1
    IN_IF_ELSE_FALSE_BLOCKS = 2

3434
    def __init__(self, cond, name=None):
3435 3436
        check_type(cond, "cond", Variable, "fluid.layers.IfElse")
        check_type(name, "name", (str, type(None)), "fluid.layers.IfElse")
3437
        self.helper = LayerHelper('ifelse', name=name)
Y
Yu Yang 已提交
3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448
        self.cond = cond
        self.input_table = {}
        self.status = IfElse.OUT_IF_ELSE_BLOCKS
        self.conditional_true_block = ConditionalBlock(inputs=[self.cond])
        self.conditional_false_block = ConditionalBlock(inputs=[self.cond])
        self.output_table = ([], [])  # (true_outs, false_outs)

    def input(self, x):
        if self.status == IfElse.OUT_IF_ELSE_BLOCKS:
            raise ValueError("input must in true/false blocks")
        if id(x) not in self.input_table:
3449
            parent_block = self._parent_block()
Y
Yu Yang 已提交
3450
            out_true = parent_block.create_var(
3451 3452 3453 3454 3455
                name=unique_name.generate_with_ignorable_key(
                    'ifelse_input' + self.helper.name
                ),
                dtype=x.dtype,
            )
Y
Yu Yang 已提交
3456 3457

            out_false = parent_block.create_var(
3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471
                name=unique_name.generate_with_ignorable_key(
                    'ifelse_input' + self.helper.name
                ),
                dtype=x.dtype,
            )
            parent_block.append_op(
                type='split_lod_tensor',
                inputs={
                    'X': x,
                    'Mask': self.cond,
                },
                outputs={'OutTrue': out_true, 'OutFalse': out_false},
                attrs={'level': 0},
            )
Y
Yu Yang 已提交
3472 3473 3474 3475 3476 3477 3478 3479 3480
            self.input_table[id(x)] = (out_true, out_false)
        else:
            out_true, out_false = self.input_table[id(x)]

        if self.status == IfElse.IN_IF_ELSE_TRUE_BLOCKS:
            return out_true
        else:
            return out_false

3481
    def _parent_block(self):
Y
Yu Yang 已提交
3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494
        current_block = self.helper.main_program.current_block()
        return self.helper.main_program.block(current_block.parent_idx)

    def true_block(self):
        return IfElseBlockGuard(True, self)

    def false_block(self):
        return IfElseBlockGuard(False, self)

    def output(self, *outs):
        if self.status == self.OUT_IF_ELSE_BLOCKS:
            raise ValueError("output can only be invoked in the sub-block")

3495 3496 3497
        out_table = self.output_table[
            1 if self.status == self.IN_IF_ELSE_TRUE_BLOCKS else 0
        ]
3498
        parent_block = self._parent_block()
Y
Yu Yang 已提交
3499
        for each_out in outs:
3500 3501 3502
            check_type(
                each_out, "each output", Variable, "fluid.layers.IfElse.output"
            )
Y
Yu Yang 已提交
3503 3504
            # create outside tensor
            outside_out = parent_block.create_var(
3505 3506 3507 3508 3509
                name=unique_name.generate_with_ignorable_key(
                    "_".join([self.helper.name, 'output'])
                ),
                dtype=each_out.dtype,
            )
Y
Yu Yang 已提交
3510 3511 3512
            out_table.append(outside_out)

            # assign local var to outside
3513
            assign(input=each_out, output=outside_out)
Y
Yu Yang 已提交
3514 3515 3516 3517

    def __call__(self):
        if self.status != self.OUT_IF_ELSE_BLOCKS:
            raise ValueError("IfElse::__call__ must be out of sub-block")
3518
        false_len, true_len = list(map(len, self.output_table))
Y
Yu Yang 已提交
3519
        if false_len == 0 and true_len == 0:
3520 3521 3522
            raise ValueError(
                "Must invoke true_block/false_block before " "__call__"
            )
Y
Yu Yang 已提交
3523 3524 3525 3526 3527 3528 3529 3530 3531 3532
        elif false_len != true_len and false_len != 0 and true_len != 0:
            raise ValueError("The output side must be same")
        elif false_len == 0 or true_len == 0:
            return self.output_table[0 if false_len != 0 else 1]

        # else none of false_len/true_len is zero
        # merge together
        rlist = []
        for false_var, true_var in zip(*self.output_table):
            rlist.append(
3533 3534 3535 3536 3537 3538 3539 3540
                merge_lod_tensor(
                    in_true=true_var,
                    in_false=false_var,
                    mask=self.cond,
                    x=self.cond,
                    level=0,
                )
            )
Y
Yu Yang 已提交
3541
        return rlist
3542 3543


3544
class DynamicRNN:
Y
yuyang18 已提交
3545
    """
3546 3547
    :api_attr: Static Graph

3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559
    **Note: the input of this class should be LoDTensor which holds the
    information of variable-length sequences. If the input is fixed-length Tensor,
    please use StaticRNN (fluid.layers.** :ref:`api_fluid_layers_StaticRNN` **) for
    better performance.**

    DynamicRNN can process a minibatch of variable-length sequences.
    The length of each sample can be different and is recorded in LoD.
    In DynamicRNN, an input sequence will be unfolded into time steps and users
    can define how to process each time step in :code:`block()` .
    The total number of time steps is determined by the longest sequence.
    DynamicRNN will not pad all sequences to the same length, instead it will
    sort the sequences internally by the sequence length in descending order.
T
tianshuo78520a 已提交
3560
    The input sequences will be shrank because only sequences of which the
3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572
    length is larger than the time step will participate the remaining calculation.

    If defined :code:`drnn = DynamicRNN()`, then users can call :code:`drnn()`
    to obtain the result sequences. It is a LoDTensor gained by merging all
    time steps's output. When RNN's input sequence x meets :code:`x.lod_level == 1`,
    the output LoDTensor will have the same LoD with x. The result of :code:`drnn()`
    includes RNN's outputs of all time steps, users can call
    :ref:`api_fluid_layers_sequence_last_step` to extract the data of the last time step.

    Warning:
        Currently it is not supported to set :code:`is_sparse = True` of any
        layers defined within DynamicRNN's :code:`block` function.
Y
yuyang18 已提交
3573

3574 3575 3576 3577
    Args:
        name (str, optional): The default value is None.  Normally there is no
            need for user to set this property.  For more information,
            please refer to :ref:`api_guide_Name` .
3578 3579 3580 3581

    Examples:
        .. code-block:: python

3582
            import paddle.fluid as fluid
3583

3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609
            sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1)
            encoder_proj = fluid.data(name='encoder_proj', shape=[None, 32], dtype='float32', lod_level=1)
            decoder_boot = fluid.data(name='boot', shape=[None, 10], dtype='float32')

            drnn = fluid.layers.DynamicRNN()
            with drnn.block():
                # Set sentence as RNN's input, each time step processes a word from the sentence
                current_word = drnn.step_input(sentence)
                # Set encode_proj as RNN's static input
                encoder_word = drnn.static_input(encoder_proj)
                # Initialize memory with boot_memory, which need reorder according to RNN's input sequences
                memory = drnn.memory(init=decoder_boot, need_reorder=True)
                fc_1 = fluid.layers.fc(input=encoder_word, size=30)
                fc_2 = fluid.layers.fc(input=current_word, size=30)
                decoder_inputs = fc_1 + fc_2
                hidden, _, _ = fluid.layers.gru_unit(input=decoder_inputs, hidden=memory, size=30)
                # Update memory with hidden
                drnn.update_memory(ex_mem=memory, new_mem=hidden)
                out = fluid.layers.fc(input=hidden, size=10, bias_attr=True, act='softmax')
                # Set hidden and out as RNN's outputs
                drnn.output(hidden, out)

            # Get RNN's result
            hidden, out = drnn()
            # Get RNN's result of the last time step
            last = fluid.layers.sequence_last_step(out)
Y
yuyang18 已提交
3610
    """
3611

3612 3613 3614 3615
    BEFORE_RNN = 0
    IN_RNN = 1
    AFTER_RNN = 2

3616 3617
    def __init__(self, name=None):
        self.helper = LayerHelper('dynamic_rnn', name=name)
3618 3619 3620 3621
        self.status = DynamicRNN.BEFORE_RNN
        self.lod_rank_table = None
        self.max_seq_len = None
        self.step_idx = None
3622
        self.zero_idx = None
3623 3624 3625
        self.mem_dict = dict()
        self.output_array = []
        self.outputs = []
X
Xin Pan 已提交
3626
        self.cond = self.helper.create_variable_for_type_inference(dtype='bool')
3627 3628 3629 3630 3631
        self.cond.stop_gradient = False
        self.while_op = While(self.cond)
        self.input_array = []
        self.mem_link = []

3632
    def step_input(self, x, level=0):
3633
        r"""
3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676
        This function is used to set sequence x as DynamicRNN's input.
        The maximum sequence length in x determines the number of time steps
        the RNN unit will be executed. DynamicRNN can take multiple inputs.
        When all inputs' :code:`lod_level` are 1, all inputs should hold the
        same LoD. When :code:`x.lod_level >= 2` , the input sequence will be
        unfold along specified level, and the slice of each time step is a
        LoDTensor whose lod_level is :code:`x.lod_level - level - 1` .
        In this case, the specified LoD level of multiple inputs should be the same.

        - Case 1:

        .. code-block:: text

            # input, where Si is slice data of shape [1, N]
            level = 0
            x.lod = [[2, 1, 3]]
            x.shape = [6, N]
            x.data = [[S0],
                      [S0],
                      [S1],
                      [S2],
                      [S2],
                      [S2]]

            # output
            # step 0, time step data of 3 sequences
            out.lod = [[]]
            out.shape = [3, N]
            out.data = [[S2],
                        [S0],
                        [S1]]

            # step 1, time step data of 2 sequences
            out.lod = [[]]
            out.shape = [2, N]
            out.data = [[S2],
                        [S0]]

            # step 2, time step data of 1 sequences
            out.lod = [[]]
            out.shape = [1, N]
            out.data = [[S2]]

H
haowang101779990 已提交
3677

Y
yuyang18 已提交
3678
        Args:
3679 3680 3681 3682 3683 3684 3685
            x (Variable): The input LoDTensor which holds information of a
                minibatch of variable-length sequences and should meet :code:`x.lod_level >= 1` .
                When RNN has multiple inputs, the first dimension should match
                across all inputs, but other shape components may differ.
                Optional data types are: bool, float16, float32, float64, int8, int16, int32, int64, uint8.
            level (int, optional): The level of lod used to split steps.
                It should be in range :math:`[0, x.lod\_level)` . The default value is 0.
Y
yuyang18 已提交
3686 3687

        Returns:
3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721
            Variable: The current time step in the input sequence. If there are :code:`num_sequences` \
                sequences in x whose length is larger than :code:`step_idx` , the returned Variable \
                will only hold the :code:`step_idx` -th time step of those `num_sequences` sequences. \
                The data type is the same as input. If :code:`x.lod_level == 1` , the return value is \
                a Tensor of shape :math:`\{num\_sequences, x.shape[1], ...\}` , or it will \
                be a variable-length LoDTensor.

        Raises:
            ValueError: When :code:`step_input()` is called outside :code:`block()` .
            TypeError: When x is not a Variable.

        Examples:
            ..  code-block:: python

                import paddle.fluid as fluid

                sentence = fluid.data(name='sentence', shape=[None, 1], dtype='int64', lod_level=1)
                embedding = fluid.layers.embedding(input=sentence, size=[65536, 32], is_sparse=True)

                drnn = fluid.layers.DynamicRNN()
                with drnn.block():
                    # Set embedding as RNN's input, each time step processes a word from the sentence
                    word = drnn.step_input(embedding)
                    # Initialize memory to a Tensor whose value is 0, shape=[batch_size, 200],
                    # where batch_size is the number of sequences in embedding.
                    memory = drnn.memory(shape=[200])
                    hidden = fluid.layers.fc(input=[word, memory], size=200, act='relu')
                    # Update memory to hidden
                    drnn.update_memory(ex_mem=memory, new_mem=hidden)
                    # Set hidden as RNN's output
                    drnn.output(hidden)

                # Get RNN's result
                rnn_output = drnn()
Y
yuyang18 已提交
3722
        """
3723
        self._assert_in_rnn_block_("step_input")
3724
        check_type(x, 'x', Variable, 'fluid.layers.DynamicRNN.step_input()')
3725 3726 3727
        parent_block = self._parent_block_()
        if self.lod_rank_table is None:
            self.lod_rank_table = parent_block.create_var(
Y
Yu Yang 已提交
3728
                name=unique_name.generate('lod_rank_table'),
3729 3730
                type=core.VarDesc.VarType.LOD_RANK_TABLE,
            )
3731
            self.lod_rank_table.stop_gradient = True
3732 3733 3734 3735 3736 3737
            parent_block.append_op(
                type='lod_rank_table',
                inputs={"X": x},
                outputs={"Out": self.lod_rank_table},
                attrs={"level": level},
            )
3738
            self.max_seq_len = parent_block.create_var(
Y
Yu Yang 已提交
3739
                name=unique_name.generate('dynamic_rnn_max_seq_len'),
3740 3741
                dtype='int64',
            )
3742
            self.max_seq_len.stop_gradient = False
3743 3744 3745 3746 3747
            parent_block.append_op(
                type='max_sequence_len',
                inputs={'RankTable': self.lod_rank_table},
                outputs={"Out": self.max_seq_len},
            )
3748
            self.cond.stop_gradient = True
3749 3750 3751 3752 3753 3754
            parent_block.append_op(
                type='less_than',
                inputs={'X': self.step_idx, 'Y': self.max_seq_len},
                outputs={'Out': self.cond},
                attrs={'force_cpu': True},
            )
3755 3756

        input_array = parent_block.create_var(
Y
Yu Yang 已提交
3757
            name=unique_name.generate('dynamic_rnn_input_array'),
3758
            type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
3759 3760
            dtype=x.dtype,
        )
3761
        self.input_array.append((input_array, x.dtype))
3762 3763 3764 3765 3766
        parent_block.append_op(
            type='lod_tensor_to_array',
            inputs={'X': x, 'RankTable': self.lod_rank_table},
            outputs={'Out': input_array},
        )
3767
        return array_read(array=input_array, i=self.step_idx)
3768

Y
yangyaming 已提交
3769
    def static_input(self, x):
3770
        r"""
3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843
        This function is used to set x as DynamicRNN's static input. It is optional.

        - Case 1, set static input with LoD

        .. code-block:: text

            # RNN's input is the same as the case listed in step_input
            # static input, where Si is slice data of shape [1, M]
            x.lod = [[3, 1, 2]]
            x.shape = [6, M]
            x.data = [[S0],
                      [S0],
                      [S0],
                      [S1],
                      [S2],
                      [S2]]

            # step 0, batch data corresponding to the 3 input sequences
            out.lod = [[2, 3, 1]]
            out.shape = [6, M]
            out.data = [[S2],
                        [S2],
                        [S0],
                        [S0],
                        [S0],
                        [S1]]

            # step 1, batch data corresponding to the 2 input sequences
            out.lod = [[2, 3]]
            out.shape = [5, M]
            out.data = [[S2],
                        [S2],
                        [S0],
                        [S0],
                        [S0]]

            # step 2, batch data corresponding to the 1 input sequences
            out.lod = [[2]]
            out.shape = [2, M]
            out.data = [[S2],
                        [S2]]


        - Case 2, set static input without LoD

        .. code-block:: text

            # RNN's input is the same as the case listed in step_input
            # static input, where Si is slice data of shape [1, M]
            x.lod = [[]]
            x.shape = [3, M]
            x.data = [[S0],
                      [S1],
                      [S2]]

            # step 0, batch data corresponding to the 3 input sequences
            out.lod = [[]]
            out.shape = [3, M]
            out.data = [[S2],
                        [S0],
                        [S1]]

            # step 1, batch data corresponding to the 2 input sequences
            out.lod = [[]]
            out.shape = [2, M]
            out.data = [[S2],
                        [S0]]

            # step 2, batch data corresponding to the 1 input sequences
            out.lod = [[]]
            out.shape = [1, M]
            out.data = [[S2]]

H
haowang101779990 已提交
3844

Y
yuyang18 已提交
3845
        Args:
3846 3847 3848 3849
            x (Variable): The static input LoDTensor which should hold the same number of sequences
                as RNN's input (the input LoDTensor set by :code:`step_input()` ). If the LoD is None,
                the input x will be treated as a minibatch with :code:`x.shape[0]` sequences of length 1.
                Optional data types are: bool, float16, float32, float64, int8, int16, int32, int64, uint8.
Y
yuyang18 已提交
3850 3851

        Returns:
T
tianshuo78520a 已提交
3852
            Variable: The input LoDTensor after sorted and shrank. If there are :code:`num_sequences` \
3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863
                sequences in RNN's input LoDTensor whose length is larger than :code:`step_idx` , \
                the static input Tensor will be sorted to the same order as RNN's input and \
                will only retain data corresponding to those :code:`num_sequences` sequences. \
                The data type is the same as input. If :code:`x.lod == None` , the return value is \
                a Tensor of shape :math:`\{num\_sequences, x.shape[1], ...\}` , or it will \
                be a variable-length LoDTensor.

        Raises:
            ValueError: When :code:`static_input()` is called outside :code:`block()` .
            TypeError: When x is not a Variable.
            RuntimeError: When :code:`static_input()` is called before :code:`step_input()` .
3864 3865 3866 3867

        Examples:
            .. code-block:: python

3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893
                import paddle.fluid as fluid

                sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1)
                encoder_proj = fluid.data(name='encoder_proj', shape=[None, 32], dtype='float32', lod_level=1)
                decoder_boot = fluid.data(name='boot', shape=[None, 10], dtype='float32')

                drnn = fluid.layers.DynamicRNN()
                with drnn.block():
                    # Set sentence as RNN's input, each time step processes a word from the sentence
                    current_word = drnn.step_input(sentence)
                    # Set encode_proj as RNN's static input
                    encoder_word = drnn.static_input(encoder_proj)
                    # Initialize memory with boot_memory, which need reorder according to RNN's input sequences
                    memory = drnn.memory(init=decoder_boot, need_reorder=True)
                    fc_1 = fluid.layers.fc(input=encoder_word, size=30)
                    fc_2 = fluid.layers.fc(input=current_word, size=30)
                    decoder_inputs = fc_1 + fc_2
                    hidden, _, _ = fluid.layers.gru_unit(input=decoder_inputs, hidden=memory, size=30)
                    # Update memory with hidden
                    drnn.update_memory(ex_mem=memory, new_mem=hidden)
                    out = fluid.layers.fc(input=hidden, size=10, bias_attr=True, act='softmax')
                    # Set out as RNN's output
                    drnn.output(out)

                # Get RNN's result
                rnn_output = drnn()
Y
yuyang18 已提交
3894
        """
Y
yangyaming 已提交
3895
        self._assert_in_rnn_block_("static_input")
3896
        check_type(x, 'x', Variable, 'fluid.layers.DynamicRNN.static_input()')
Y
yangyaming 已提交
3897 3898
        if self.lod_rank_table is None:
            raise RuntimeError(
3899 3900
                "static_input() must be called after step_input()."
            )
Y
yangyaming 已提交
3901 3902
        parent_block = self._parent_block_()
        x_reordered = parent_block.create_var(
Y
Yu Yang 已提交
3903
            name=unique_name.generate("dynamic_rnn_static_input_reordered"),
Y
yangyaming 已提交
3904
            type=core.VarDesc.VarType.LOD_TENSOR,
3905 3906 3907 3908 3909 3910 3911
            dtype=x.dtype,
        )
        parent_block.append_op(
            type='reorder_lod_tensor_by_rank',
            inputs={'X': [x], 'RankTable': [self.lod_rank_table]},
            outputs={'Out': [x_reordered]},
        )
Y
yangyaming 已提交
3912 3913
        return shrink_memory(x_reordered, self.step_idx, self.lod_rank_table)

S
rename  
sneaxiy 已提交
3914
    @signature_safe_contextmanager
3915
    def block(self):
Y
yuyang18 已提交
3916
        """
3917 3918 3919 3920 3921 3922
        The function is used to list the operations executed during
        each time step in RNN. The operation list will be executed :code:`max_sequence_len`
        times (where :code:`max_sequence_len` is the maximum length of RNN's input sequences).

        Raises:
            ValueError: When :code:`block()` is called multi-times.
Y
yuyang18 已提交
3923
        """
3924 3925
        if self.status != DynamicRNN.BEFORE_RNN:
            raise ValueError("rnn.block() can only be invoke once")
3926 3927 3928
        self.step_idx = fill_constant(
            shape=[1], dtype='int64', value=0, force_cpu=True
        )
3929 3930 3931 3932
        self.step_idx.stop_gradient = False
        self.status = DynamicRNN.IN_RNN
        with self.while_op.block():
            yield
3933
            increment(x=self.step_idx, value=1.0, in_place=True)
3934 3935

            for new_mem, mem_array in self.mem_link:
3936 3937
                array_write(x=new_mem, i=self.step_idx, array=mem_array)

3938 3939 3940 3941 3942 3943
            less_than(
                x=self.step_idx,
                y=self.max_seq_len,
                force_cpu=True,
                cond=self.cond,
            )
3944 3945 3946 3947

        self.status = DynamicRNN.AFTER_RNN
        for each_array in self.output_array:
            self.outputs.append(
3948 3949
                array_to_lod_tensor(x=each_array, table=self.lod_rank_table)
            )
3950 3951

    def __call__(self, *args, **kwargs):
Y
yuyang18 已提交
3952
        """
T
tianshuo78520a 已提交
3953
        This function is used to get the output  sequences of DynamicRNN.
3954 3955 3956 3957 3958 3959 3960 3961 3962

        Args:
            None

        Returns:
            Variable or Variable list: RNN's output sequences.

        Raises:
            ValueError: When :code:`__call__()` is called before :code:`block()` .
Y
yuyang18 已提交
3963
        """
3964
        if self.status != DynamicRNN.AFTER_RNN:
3965 3966 3967 3968 3969 3970
            raise ValueError(
                (
                    "Output of the dynamic RNN can only be visited "
                    "outside the rnn block."
                )
            )
3971 3972 3973 3974 3975
        if len(self.outputs) == 1:
            return self.outputs[0]
        else:
            return self.outputs

3976 3977 3978 3979 3980 3981 3982 3983
    def memory(
        self,
        init=None,
        shape=None,
        value=0.0,
        need_reorder=False,
        dtype='float32',
    ):
3984
        r"""
3985 3986 3987
        Create a memory Variable for DynamicRNN to deliver data cross time steps.
        It can be initialized by an existing Tensor or a constant Tensor of given
        dtype and shape.
Y
yuyang18 已提交
3988

3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000
        Args:
            init (Variable, optional): LoDTensor used to initialize the memory.
                If init is not None, it should hold the same number of sequences
                as RNN's input (the input LoDTensor set by :code:`step_input()` )
                and the memory will be initialized to it. If init's LoD is None,
                it will be treated as a minibatch with :code:`init.shape[0]` sequences
                of length 1. The default value is None.
            shape (list|tuple, optional): When init is None, it is used to specify
                the memory's shape. Note that the shape does not include the batch_size.
                If setting shape to :math:`\{D_1, D_2, ...\}` , the shape of memory Tensor
                will be :math:`\{batch\_size, D_1, D_2, ...\}` , where batch_size is
                determined by RNN's input sequences. The default value is None.
T
tianshuo78520a 已提交
4001
            value (float, optional): When init is None, it is used as initialized value
4002 4003
                of memory. The default value is 0.0.
            need_reorder (bool, optional): When init is not None, it determines whether
T
tianshuo78520a 已提交
4004
                the memory needs to reorder like the RNN's input sequences. It should be
4005 4006 4007 4008 4009 4010 4011
                set to True when the initialized memory depends on the order of input samples.
                The default value is False.
            dtype (str|numpy.dtype, optional): When init is None, it is used to set the
                data type of memory. The default value is "float32". Optional data types
                are: "float32", "float64", "int32", "int64".

        Returns:
T
tianshuo78520a 已提交
4012
            Variable: The memory LoDTensor after shrank.  If there are :code:`num_sequences` \
4013
                sequences in RNN's input LoDTensor whose length is larger than :code:`step_idx` , \
T
tianshuo78520a 已提交
4014
                the memory Tensor also need to be shrank and will only retain data \
4015 4016 4017 4018 4019 4020
                corresponding to those :code:`num_sequences` sequences.

        Raises:
            ValueError: When :code:`memory()` is called outside :code:`block()` .
            TypeError: When init is set and is not a Variable.
            ValueError: When :code:`memory()` is called before :code:`step_input()` .
Y
yuyang18 已提交
4021

4022 4023 4024
        Examples:
            .. code-block:: python

4025
                import paddle.fluid as fluid
4026

4027 4028
                sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1)
                boot_memory = fluid.data(name='boot', shape=[None, 10], dtype='float32')
4029

4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040
                drnn = fluid.layers.DynamicRNN()
                with drnn.block():
                    # Set sentence as RNN's input, each time step processes a word from the sentence
                    word = drnn.step_input(sentence)
                    # Initialize memory with boot_memory, which need reorder according to RNN's input sequences
                    memory = drnn.memory(init=boot_memory, need_reorder=True)
                    hidden = fluid.layers.fc(input=[word, memory], size=10, act='tanh')
                    # Update memory with hidden
                    drnn.update_memory(ex_mem=memory, new_mem=hidden)
                    # Set hidden as RNN's output
                    drnn.output(hidden)
Y
yuyang18 已提交
4041

4042 4043
                # Get RNN's result
                rnn_output = drnn()
Y
yuyang18 已提交
4044 4045


4046 4047
        Examples:
            .. code-block:: python
Y
yuyang18 已提交
4048

4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067
                import paddle.fluid as fluid

                sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1)

                drnn = fluid.layers.DynamicRNN()
                with drnn.block():
                    # Set sentence as RNN's input, each time step processes a word from the sentence
                    word = drnn.step_input(sentence)
                    # Initialize memory to a Tensor whose value is 0, shape=[batch_size, 10],
                    # where batch_size is the number of sequences in sentence.
                    memory = drnn.memory(shape=[10], dtype='float32', value=0)
                    hidden = fluid.layers.fc(input=[word, memory], size=10, act='tanh')
                    # Update memory with hidden
                    drnn.update_memory(ex_mem=memory, new_mem=hidden)
                    # Set hidden as RNN's output
                    drnn.output(hidden)

                # Get RNN's result
                rnn_output = drnn()
Y
yuyang18 已提交
4068
        """
4069
        self._assert_in_rnn_block_('memory')
4070
        self._init_zero_idx_()
4071
        if shape is not None:
4072 4073 4074 4075 4076 4077
            check_type(
                shape,
                'shape',
                (list, tuple),
                'fluid.layers.DynamicRNN.memory()',
            )
4078
        if init is not None:
4079 4080 4081
            check_type(
                init, 'init', Variable, 'fluid.layers.DynamicRNN.memory()'
            )
4082
            parent_block = self._parent_block_()
4083 4084 4085 4086 4087 4088
            init_tensor = init
            if need_reorder == True:
                if self.lod_rank_table is None:
                    raise ValueError(
                        'If set need_reorder to True, make sure step_input be '
                        'invoked before '
4089 4090
                        'memory(init=init, need_reordered=True, ...).'
                    )
4091
                init_reordered = parent_block.create_var(
Y
Yu Yang 已提交
4092
                    name=unique_name.generate('dynamic_rnn_mem_init_reordered'),
4093
                    type=core.VarDesc.VarType.LOD_TENSOR,
4094 4095 4096 4097 4098 4099 4100 4101 4102 4103
                    dtype=init.dtype,
                )
                parent_block.append_op(
                    type='reorder_lod_tensor_by_rank',
                    inputs={
                        'X': [init_tensor],
                        'RankTable': [self.lod_rank_table],
                    },
                    outputs={'Out': [init_reordered]},
                )
4104
                init_tensor = init_reordered
4105
            mem_array = parent_block.create_var(
Y
Yu Yang 已提交
4106
                name=unique_name.generate('dynamic_rnn_mem_array'),
4107
                type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
4108 4109 4110 4111 4112 4113 4114
                dtype=init.dtype,
            )
            parent_block.append_op(
                type='write_to_array',
                inputs={'X': init_tensor, 'I': self.zero_idx},
                outputs={'Out': mem_array},
            )
4115
            retv = array_read(array=mem_array, i=self.step_idx)
4116 4117 4118
            retv = shrink_memory(
                x=retv, i=self.step_idx, table=self.lod_rank_table
            )
4119 4120 4121 4122 4123 4124 4125 4126 4127
            self.mem_dict[retv.name] = mem_array
            return retv
        else:
            if len(self.input_array) == 0:
                raise ValueError(
                    "step_input should be invoked before memory(shape=..., value=...)"
                )
            parent_block = self._parent_block_()
            init = parent_block.create_var(
4128 4129
                name=unique_name.generate('mem_init'), dtype=dtype
            )
4130
            arr, dtype = self.input_array[0]
4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148
            in0 = parent_block.create_var(
                name=unique_name.generate('in0'), dtype=dtype
            )
            parent_block.append_op(
                type='read_from_array',
                inputs={'X': [arr], 'I': [self.zero_idx]},
                outputs={'Out': [in0]},
            )
            parent_block.append_op(
                type='fill_constant_batch_size_like',
                inputs={'Input': [in0]},
                outputs={'Out': [init]},
                attrs={
                    'shape': [-1] + shape,
                    'value': float(value),
                    'dtype': init.dtype,
                },
            )
4149 4150 4151
            return self.memory(init=init)

    def update_memory(self, ex_mem, new_mem):
Y
yuyang18 已提交
4152
        """
4153 4154
        Update the memory which need to be delivered across time steps.

Y
yuyang18 已提交
4155
        Args:
4156 4157 4158
            ex_mem (Variable): The memory data of previous time step.
            new_mem (Variable): The new memory data produced in current time step.
                The shape and data type of ex_mem and new_mem should be the same.
Y
yuyang18 已提交
4159 4160 4161

        Returns:
            None
4162

4163 4164 4165 4166 4167
        Raises:
            ValueError: When :code:`update_memory()` is called outside :code:`block()` .
            TypeError: When :code:`ex_mem` or :code:`new_mem` is not a Variable.
            ValueError: When :code:`ex_mem` is defined by :code:`memory()` .
            ValueError: When :code:`update_memory()` is called before :code:`step_input()` .
Y
yuyang18 已提交
4168
        """
4169
        self._assert_in_rnn_block_('update_memory')
4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181
        check_type(
            ex_mem,
            'ex_mem',
            Variable,
            'fluid.layers.DynamicRNN.update_memory()',
        )
        check_type(
            new_mem,
            'new_mem',
            Variable,
            'fluid.layers.DynamicRNN.update_memory()',
        )
4182 4183 4184 4185 4186 4187 4188 4189 4190 4191

        mem_array = self.mem_dict.get(ex_mem.name, None)
        if mem_array is None:
            raise ValueError("Please invoke memory before update_memory")
        if self.lod_rank_table is None:
            raise ValueError("Please invoke step_input before update_memory")

        self.mem_link.append((new_mem, mem_array))

    def output(self, *outputs):
Y
yuyang18 已提交
4192
        """
4193
        This function is used to set :code:`outputs` as RNN's output.
Y
yuyang18 已提交
4194 4195

        Args:
4196 4197
            *outputs (Variable ...): The output Tensor. DynamicRNN can mark multiple
                Variables as its output.
Y
yuyang18 已提交
4198 4199 4200

        Returns:
            None
4201 4202 4203

        Raises:
            ValueError: When :code:`output()` is called outside :code:`block()` .
Y
yuyang18 已提交
4204
        """
4205 4206 4207
        self._assert_in_rnn_block_('output')
        parent_block = self._parent_block_()
        for each in outputs:
4208 4209 4210
            check_type(
                each, "outputs", Variable, "fluid.layers.DynamicRNN.output"
            )
4211
            outside_array = parent_block.create_var(
4212 4213 4214
                name=unique_name.generate_with_ignorable_key(
                    "_".join([self.helper.name, "output_array", each.name])
                ),
4215
                type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
4216 4217
                dtype=each.dtype,
            )
4218 4219 4220
            array_write(x=each, i=self.step_idx, array=outside_array)
            self.output_array.append(outside_array)

4221 4222 4223 4224
    def _init_zero_idx_(self):
        if self.zero_idx is None:
            parent_block = self._parent_block_()
            self.zero_idx = parent_block.create_var(
4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237
                name=unique_name.generate('zero_idx'), dtype='int64'
            )
            parent_block.append_op(
                type='fill_constant',
                inputs={},
                outputs={'Out': [self.zero_idx]},
                attrs={
                    'shape': [1],
                    'dtype': self.zero_idx.dtype,
                    'value': float(0),
                    'force_cpu': True,
                },
            )
4238

4239 4240 4241 4242 4243 4244 4245 4246 4247 4248
    def _parent_block_(self):
        prog = self.helper.main_program
        parent_idx = prog.current_block().parent_idx
        assert parent_idx >= 0
        parent_block = prog.block(parent_idx)

        return parent_block

    def _assert_in_rnn_block_(self, method):
        if self.status != DynamicRNN.IN_RNN:
4249
            raise ValueError(
4250 4251
                "{0} can only be invoked inside rnn block.".format(method)
            )
Y
Yang Yu 已提交
4252 4253


L
liym27 已提交
4254 4255
def switch_case(branch_index, branch_fns, default=None, name=None):
    '''
4256 4257
    :api_attr: Static Graph

L
liym27 已提交
4258 4259 4260
    This operator is like a C++ switch/case statement.

    Args:
4261
        branch_index(Tensor): A Tensor with shape [1] to specify which branch to execute. The data type is ``int32``, ``int64`` or ``uint8``.
L
liym27 已提交
4262 4263 4264 4265 4266
        branch_fns(dict|list|tuple): If it's a list or tuple, the elements in it could be pairs of (int, callable) or simple callables whose actual index will be used as the index of callable. If it's a dict, its key is a python integer and the value is a callable. All callables return the same structure of Tensors.
        default(callable, optional): Callable that returns a structure of Tensors.
        name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.

    Returns:
4267
        Tensor|list(Tensor): Tensors returned by the callable specified by ``branch_index`` in ``branch_fns``,
L
liym27 已提交
4268 4269 4270 4271
        or Tensors returned by ``default`` if ``default`` is not None and no index matches in ``branch_fns``,
        or Tensors returned by the callable with the max index in ``branch_fns`` if ``default`` is None and no index matches in ``branch_fns``.

    Raises:
4272
        TypeError: If the type of ``branch_index`` is not Tensor.
L
liym27 已提交
4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283
        TypeError: If the data type of ``branch_index`` is not ``int32``, ``int64`` or ``uint8``.
        TypeError: If the type of ``branch_fns`` is not dict, list or tuple.
        TypeError: If the elements of ``branch_fns`` is not 2-tuple.
        TypeError: If the first element of 2-tuple in ``branch_fns`` is not integer.
        ValueError: If the first element of 2-tuple in ``branch_fns`` is not unique.
        TypeError: If the second element of 2-tuple in ``branch_fns`` is not callable.
        TypeError: If ``default`` is not None but it is not callable.

    Examples:
        .. code-block:: python

4284 4285 4286
            import paddle

            paddle.enable_static()
4287

L
liym27 已提交
4288
            def fn_1():
4289
                return paddle.full(shape=[1, 2], dtype='float32', fill_value=1)
L
liym27 已提交
4290 4291

            def fn_2():
4292
                return paddle.full(shape=[2, 2], dtype='int32', fill_value=2)
L
liym27 已提交
4293 4294

            def fn_3():
4295
                return paddle.full(shape=[3], dtype='int32', fill_value=3)
L
liym27 已提交
4296

4297 4298 4299
            main_program = paddle.static.default_startup_program()
            startup_program = paddle.static.default_main_program()
            with paddle.static.program_guard(main_program, startup_program):
4300 4301
                index_1 = paddle.full(shape=[1], dtype='int32', fill_value=1)
                index_2 = paddle.full(shape=[1], dtype='int32', fill_value=2)
L
liym27 已提交
4302

4303
                out_1 = paddle.static.nn.switch_case(
L
liym27 已提交
4304 4305 4306 4307
                    branch_index=index_1,
                    branch_fns={1: fn_1, 2: fn_2},
                    default=fn_3)

4308
                out_2 = paddle.static.nn.switch_case(
L
liym27 已提交
4309 4310 4311 4312 4313
                    branch_index=index_2,
                    branch_fns=[(1, fn_1), (2, fn_2)],
                    default=fn_3)

                # Argument default is None and no index matches. fn_3 will be called because of the max index 7.
4314
                out_3 = paddle.static.nn.switch_case(
L
liym27 已提交
4315 4316 4317
                    branch_index=index_2,
                    branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)])

4318
                exe = paddle.static.Executor(paddle.CPUPlace())
4319
                res_1, res_2, res_3 = exe.run(main_program, fetch_list=[out_1, out_2, out_3])
L
liym27 已提交
4320 4321 4322 4323 4324 4325 4326 4327
                print(res_1)  # [[1. 1.]]
                print(res_2)  # [[2 2] [2 2]]
                print(res_3)  # [3 3 3]
    '''
    helper = LayerHelper('switch_case', **locals())

    def _check_args(branch_index, branch_fns, default):

4328 4329 4330 4331 4332 4333
        check_variable_and_dtype(
            branch_index,
            'branch_index',
            ['uint8', 'int32', 'int64'],
            'switch_case',
        )
L
liym27 已提交
4334 4335 4336 4337

        if convert_dtype(branch_index.dtype) != "int64":
            branch_index = cast(branch_index, "int64")

4338
        check_type(branch_fns, 'branch_fns', (list, tuple, dict), 'switch_case')
L
liym27 已提交
4339

4340 4341 4342
        branch_fns = (
            branch_fns.items() if isinstance(branch_fns, dict) else branch_fns
        )
L
liym27 已提交
4343

4344 4345 4346 4347 4348
        branch_fns = (
            list(enumerate(branch_fns))
            if all(callable(fn) for fn in branch_fns)
            else branch_fns
        )
L
liym27 已提交
4349 4350 4351 4352 4353

        keys_of_fns = []
        for index_fn_pair in branch_fns:
            if not isinstance(index_fn_pair, tuple):
                raise TypeError(
4354 4355 4356 4357 4358 4359 4360 4361
                    _error_message(
                        "The elements' type",
                        "branch_fns",
                        "switch_case",
                        tuple,
                        type(branch_fns),
                    )
                )
L
liym27 已提交
4362 4363 4364

            if len(index_fn_pair) != 2:
                raise TypeError(
4365 4366 4367 4368 4369 4370 4371 4372
                    _error_message(
                        "The tuple's size",
                        "branch_fns",
                        "switch_case",
                        "2",
                        str(len(index_fn_pair)) + "-tuple",
                    )
                )
L
liym27 已提交
4373 4374 4375 4376 4377

            key, fn = index_fn_pair

            if not isinstance(key, int):
                raise TypeError(
4378 4379 4380 4381 4382 4383 4384 4385
                    _error_message(
                        "The key's type",
                        "branch_fns",
                        "switch_case",
                        int,
                        type(key),
                    )
                )
L
liym27 已提交
4386 4387 4388

            if key in keys_of_fns:
                raise ValueError(
4389 4390 4391 4392
                    "The key in 'branch_fns' must be unique, but '{}' appears more than once.".format(
                        key
                    )
                )
L
liym27 已提交
4393 4394 4395 4396 4397
            else:
                keys_of_fns.append(key)

            if not callable(fn):
                raise TypeError(
4398 4399
                    _error_message(
                        "The type of function for key {}".format(key),
4400 4401 4402 4403 4404 4405
                        "branch_fns",
                        "switch_case",
                        "callable",
                        type(fn),
                    )
                )
L
liym27 已提交
4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429

        if default is None:
            default = sorted(branch_fns)[-1][1]
            branch_fns = sorted(branch_fns)[:-1]
        elif not callable(default):
            raise TypeError("The default in Op(case) must be callable.")

        pred_fn_pairs = []
        for index, fn in branch_fns:
            new_index = fill_constant(shape=[1], dtype="int64", value=index)
            pred = equal(branch_index, new_index)
            pred_fn_pairs.append((pred, fn))

        return pred_fn_pairs, default

    pred_fn_pairs, default = _check_args(branch_index, branch_fns, default)
    false_fn = default
    for pred, true_fn in pred_fn_pairs:
        false_fn = partial(cond, pred=pred, true_fn=true_fn, false_fn=false_fn)

    final_fn = false_fn
    return final_fn()


4430
@templatedoc()
Y
Yang Yu 已提交
4431
def reorder_lod_tensor_by_rank(x, rank_table):
4432 4433 4434 4435
    """
    ${comment}

    Args:
4436 4437
        x(${x_type}): ${x_comment}.
        rank_table(${rank_table_type}): ${rank_table_comment}.
4438

4439
    Returns:
4440
        out(${out_type}): ${out_comment}.
4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          data_desc = (['input', [9], 0], ['ref', [5], 1])
          data = fluid.layers.data(name=data_desc[0][0], shape=data_desc[0][1])
          rank_data = fluid.layers.data(name=data_desc[1][0], shape=data_desc[1][1])
          table = fluid.layers.control_flow.lod_rank_table(rank_data)
          new_data = fluid.layers.reorder_lod_tensor_by_rank(
                           x=data, rank_table=table)

    """
4454 4455

    check_type(x, 'x', (Variable), 'reorder_lod_tensor_by_rank')
4456 4457 4458
    check_type(
        rank_table, 'rank_table', (Variable), 'reorder_lod_tensor_by_rank'
    )
4459 4460 4461
    if rank_table.type != core.VarDesc.VarType.LOD_RANK_TABLE:
        raise TypeError("The type of rank_table should be LOD_RANK_TABLE.")

Y
Yang Yu 已提交
4462 4463
    helper = LayerHelper('reorder_lod_tensor_by_rank', **locals())

X
Xin Pan 已提交
4464
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
4465 4466 4467 4468 4469
    helper.append_op(
        type='reorder_lod_tensor_by_rank',
        inputs={'X': [x], 'RankTable': [rank_table]},
        outputs={'Out': [out]},
    )
Y
Yang Yu 已提交
4470
    return out
4471 4472


4473
def is_empty(x, name=None):
4474
    """
4475

4476
    Test whether a Tensor is empty.
4477 4478

    Args:
4479 4480 4481 4482
        x (Tensor): The Tensor to be tested.
        name (str, optional): The default value is ``None`` . Normally users
                            don't have to set this parameter. For more information,
                            please refer to :ref:`api_guide_Name` .
4483 4484

    Returns:
4485
        Tensor: A bool scalar Tensor. True if 'x' is an empty Tensor.
4486 4487 4488 4489

    Examples:
        .. code-block:: python

4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500
            import paddle

            input = paddle.rand(shape=[4, 32, 32], dtype='float32')
            res = paddle.is_empty(x=input)
            print("res:", res)
            # ('res:', Tensor: eager_tmp_1
            #    - place: CPUPlace
            #    - shape: [1]
            #    - layout: NCHW
            #    - dtype: bool
            #    - data: [0])
4501

4502
    """
H
hong 已提交
4503
    if in_dygraph_mode():
W
wanghuancoder 已提交
4504
        return _C_ops.is_empty(x)
4505 4506
    if _in_legacy_dygraph():
        return _legacy_C_ops.is_empty(x)
4507

4508 4509 4510
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int32', 'int64'], 'is_empty'
    )
4511 4512
    check_type(name, "name", (str, type(None)), "is_empty")

4513
    helper = LayerHelper("is_empty", **locals())
4514 4515
    cond = helper.create_variable_for_type_inference(dtype='bool')
    cond.stop_gradient = True
4516 4517 4518
    helper.append_op(
        type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]}
    )
4519
    return cond