control_flow.py 119.3 KB
Newer Older
1
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

S
rename  
sneaxiy 已提交
15
from ..wrapped_decorator import signature_safe_contextmanager
D
dzhwinter 已提交
16

17
from .layer_function_generator import templatedoc
18
from .tensor import assign, cast, fill_constant
19
from .. import core
20 21 22 23 24 25 26 27 28
from ..framework import (
    Program,
    Variable,
    Operator,
    _non_static_mode,
    static_only,
    _in_legacy_dygraph,
    in_dygraph_mode,
)
29
from ..layer_helper import LayerHelper, unique_name
30 31 32 33 34 35 36 37 38 39 40
from .utils import (
    assert_same_structure,
    map_structure,
    hold_mutable_vars,
    copy_mutable_vars,
    padding_to_same_structure,
    is_sequence,
    pack_sequence_as,
    flatten,
    to_sequence,
)
Y
yuyang18 已提交
41
import numpy
42
import warnings
L
liym27 已提交
43
from functools import reduce, partial
44 45 46 47 48 49
from ..data_feeder import (
    convert_dtype,
    check_variable_and_dtype,
    check_type,
    check_dtype,
)
50
from ..backward import _infer_var_data_type_shape_
2
201716010711 已提交
51
import paddle
52
from paddle import _C_ops, _legacy_C_ops
D
dzhwinter 已提交
53

Q
QI JUN 已提交
54
__all__ = [
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
    'While',
    'Switch',
    'increment',
    'array_write',
    'array_read',
    'cond',
    'IfElse',
    'StaticRNN',
    'reorder_lod_tensor_by_rank',
    'Print',
    'Assert',
    'is_empty',
    'case',
    'switch_case',
    'while_loop',
D
dzhwinter 已提交
70 71
]

Y
Yu Yang 已提交
72

73 74
def select_output(input, outputs, mask):
    """
75
    **select_output**
76 77 78 79 80 81 82 83 84 85 86 87 88 89
    This API takes in one input and multiple outputs and an integer mask. It
    selects the output specified by the mask and copy the input to selected
    output. It is useful in control flow.

    Args:
        input(Variable): The input variable
        outputs(tuple|list): The output variables
        mask(Variable): A tensor containing 1 integer number selecting which
            output to be copied with input

    Returns:
        Variable: The outputs variables
    """
    helper = LayerHelper('select_output', **locals())
90 91 92 93
    check_type(input, 'input', (Variable), 'select_output')
    check_variable_and_dtype(mask, 'mask', ['int32'], 'select_output')
    check_type(outputs, 'outputs', (list, tuple), 'select_output')

94 95 96 97 98
    helper.append_op(
        type='select_output',
        inputs={'X': input, 'Mask': mask},
        outputs={'Out': outputs},
    )
99 100 101
    return outputs


102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
def _select_input_infer_shape(first_shape, second_shape):
    """
    This function infer the output shape by following algorithm:
    1. if the dims is different, raise a error.
    2. compare axis one by one:
        if a == b: we set axis to a
        if a != b: we set axis to -1
    for compatibility,non declarative mode, we just return second_shape.
    """
    if len(first_shape) != len(second_shape):
        warnings.warn(
            f"the input shapes of select_input should have the same rank, but get {first_shape}, {second_shape}"
        )
        return second_shape
    out_shape = list(
117 118
        map(lambda a, b: a if a == b else -1, first_shape, second_shape)
    )
119 120 121
    return out_shape


122 123 124
def select_input(inputs, mask):
    """
    **select_input**
125

126 127 128 129 130 131 132 133 134 135 136 137
    This API takes in multiple inputs and uses an integer mask to select one
    input to output. It is useful in control flow.

    Args:
        inputs(tuple|list): The input variables
        mask(Variable): A tensor containing 1 integer number selecting which
            input to output

    Returns:
        Variable: The selected input variable
    """
    helper = LayerHelper('select_input', **locals())
138 139 140
    check_type(inputs, 'inputs', (list, tuple), 'select_input')
    check_variable_and_dtype(mask, 'mask', ['int32'], 'select_input')

141
    # Select input should expand the shape. If it is - 1 and valid number, use - 1 first. If the dim is different, an error will be reported directly
142
    # assert inputs[0].dtype == inputs[1].dtype, f"Expect the inputs should have the same dtype, but get {inputs[0].dtype} and {inputs[1].dtype}"
143 144 145
    output_shape = _select_input_infer_shape(inputs[0].shape, inputs[1].shape)
    output_dtype = inputs[1].dtype
    output_type = inputs[1].type
146

147 148 149 150 151 152 153 154
    out = helper.create_variable(
        dtype=output_dtype, shape=output_shape, type=output_type
    )
    helper.append_op(
        type='select_input',
        inputs={'X': inputs, 'Mask': mask},
        outputs={'Out': out},
    )
155 156 157
    return out


158
def select_input_with_buildin_type(inputs, mask, name):
159
    from paddle.jit.dy2static.variable_trans_func import (
160 161
        to_static_variable,
    )
162
    from paddle.jit.dy2static.utils import UndefinedVar
163

164 165
    false_var, true_var = inputs

166
    if isinstance(false_var, UndefinedVar) and isinstance(
167 168 169
        true_var, UndefinedVar
    ):
        """None -> UndefinedVar, so the real value is a [None, UndefinedVar] or [None, None], we just return None."""
170 171
        return None

172
    if isinstance(false_var, Variable) and isinstance(true_var, Variable):
173 174 175 176
        try:
            return select_input(inputs, mask)
        except Exception as e:
            raise RuntimeError(
177 178
                f"Exceptions throwed while doing select_input on {name}:\n{e}"
            )
179

180 181 182
    elif isinstance(false_var, support_ret_buildin_type) and isinstance(
        false_var, type(true_var)
    ):
183 184 185 186
        if false_var == true_var:
            return false_var
        else:
            inputs = [
187
                to_static_variable(false_var),
188
                to_static_variable(true_var),
189 190
            ]
    # Deal with the situations like this: false_var is int and true_var is Variable
191 192 193 194 195 196 197
    elif (
        isinstance(false_var, support_ret_buildin_type)
        and isinstance(true_var, Variable)
    ) or (
        isinstance(true_var, support_ret_buildin_type)
        and isinstance(false_var, Variable)
    ):
198 199 200
        inputs = [to_static_variable(false_var), to_static_variable(true_var)]
        warnings.warn(
            "Return results from different branches in cond are not same type: "
201
            "false_var returned by false_fn is '{}' and true_var of true_fn is "
202 203 204 205 206 207 208 209 210
            "'{}'".format(type(false_var), type(true_var))
        )
    elif (
        isinstance(false_var, UndefinedVar)
        and isinstance(true_var, (Variable,) + support_ret_buildin_type)
    ) or (
        isinstance(true_var, UndefinedVar)
        and isinstance(false_var, (Variable,) + support_ret_buildin_type)
    ):
211 212

        def create_var_if_not_undefined_var(a):
213 214
            if isinstance(a, UndefinedVar):
                return a
215 216
            return to_static_variable(a)

217
        true_var, false_var = to_static_variable(true_var), to_static_variable(
218 219
            false_var
        )
220
        inputs = [false_var, true_var]
221 222 223
    else:
        raise TypeError(
            "Unsupported return type of true_fn and false_fn in cond: false_var "
224
            "returned by false_fn is '{}' and true_var of true_fn is '{}'".format(
225 226 227
                type(false_var), type(true_var)
            )
        )
228 229 230 231
    try:
        return select_input(inputs, mask)
    except Exception as e:
        raise RuntimeError(
232 233
            f"Exceptions throwed while doing select_input on {name}:\n{e}"
        )
234 235


236
def split_lod_tensor(input, mask, level=0):
237 238 239 240
    """
    This function takes in an input that contains the complete lod information,
    and takes in a mask which is used to mask certain parts of the input.
    The output is the true branch and the false branch with the mask applied to
Q
qiaolongfei 已提交
241 242
    the input at a certain level in the tensor. Mainly used in IfElse to split
    data into two parts.
243 244

    Args:
245
        input(Variable|tuple|list|None): The input tensor that contains complete
246
                                lod information needed to construct the output.
247
        mask(Variable|list): A bool column vector which masks the input.
Q
qiaolongfei 已提交
248
        level(int): The specific lod level to split.
249 250

    Returns:
Q
qiaolongfei 已提交
251 252 253 254
        tuple(Variable, Variable):
        The true branch of tensor as per the mask applied to input.

        The false branch of tensor as per the mask applied to input.
255 256 257 258

    Examples:
        .. code-block:: python

259
          import paddle.fluid as fluid
Q
qiaolongfei 已提交
260
          x = fluid.layers.data(name='x', shape=[1])
261 262
          x.persistable = True

Q
qiaolongfei 已提交
263
          y = fluid.layers.data(name='y', shape=[1])
264 265
          y.persistable = True

Q
qiaolongfei 已提交
266
          out_true, out_false = fluid.layers.split_lod_tensor(
267
                input=x, mask=y, level=level)
268

269
    """
270 271 272 273 274 275
    check_type(
        input,
        'input',
        (Variable, list, tuple, type(None)),
        'fluid.layers.split_lod_tensor',
    )
276 277
    check_type(mask, 'mask', (Variable, list), 'fluid.layers.split_lod_tensor')
    check_type(level, 'level', int, 'fluid.layers.split_lod_tensor')
278
    helper = LayerHelper('split_lod_tensor', **locals())
X
Xin Pan 已提交
279 280
    out_true = helper.create_variable_for_type_inference(dtype=input.dtype)
    out_false = helper.create_variable_for_type_inference(dtype=input.dtype)
281 282 283 284 285 286 287 288 289
    helper.append_op(
        type='split_lod_tensor',
        inputs={
            'X': input,
            'Mask': mask,
        },
        outputs={'OutTrue': out_true, 'OutFalse': out_false},
        attrs={'level': level},
    )
290 291 292
    return out_true, out_false


293
def merge_lod_tensor(in_true, in_false, x, mask, level=0):
294 295 296 297 298
    """
    **merge_lod_tensor**

    This function takes in an input :math:`x`, the True branch, the False
    branch and a binary :math:`mask`. Using this information, this function
Q
qiaolongfei 已提交
299 300 301
    merges the True and False branches of the tensor into a single tensor as
    output at a certain lod level indicated by :math:`level`. Used in IfElse
    to merge the output if True block and False Block.
302 303

    Args:
304 305 306
        in_true(Variable|tuple|list|None): The True branch to be merged.
        in_false(Variable|tuple|list|None): The False branch to be merged.
        x(Variable|tuple|list|None): The input tensor that contains complete
307
                            lod information needed to construct the output.
308
        mask(Variable|list): A bool column vector which masks the input.
Q
qiaolongfei 已提交
309
        level(int): The specific lod level to merge.
310 311 312 313 314 315 316

    Returns:
        Variable: The merged output tensor.

    Examples:
        .. code-block:: python

317
          import paddle.fluid as fluid
318 319 320 321 322 323 324 325 326 327 328 329
          x = layers.data(
                      name='x', shape=[1], dtype='float32', stop_gradient=False)
          y = layers.data(
                name='y', shape=[1], dtype='bool', stop_gradient=False)

          level = 0

          out_true, out_false = layers.split_lod_tensor(
                input=x, mask=y, level=level)
          out = layers.merge_lod_tensor(
                in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
    """
330
    helper = LayerHelper('merge_lod_tensor', **locals())
331 332 333 334 335 336
    check_type(
        x,
        'x',
        (Variable, list, tuple, type(None)),
        'fluid.layers.merge_lod_tensor',
    )
337
    check_type(mask, 'mask', (Variable, list), 'fluid.layers.merge_lod_tensor')
338 339 340 341 342 343 344 345 346 347 348 349
    check_type(
        in_true,
        'in_true',
        (Variable, list, tuple, type(None)),
        'fluid.layers.merge_lod_tensor',
    )
    check_type(
        in_false,
        'in_false',
        (Variable, list, tuple, type(None)),
        'fluid.layers.merge_lod_tensor',
    )
X
Xin Pan 已提交
350
    out = helper.create_variable_for_type_inference(dtype=in_true.dtype)
351 352 353 354 355 356
    helper.append_op(
        type='merge_lod_tensor',
        inputs={'X': x, 'Mask': mask, 'InTrue': in_true, 'InFalse': in_false},
        outputs={'Out': out},
        attrs={'level': level},
    )
357 358 359
    return out


360
@static_only
361 362 363 364 365 366 367 368 369 370 371 372
def Print(
    input,
    first_n=-1,
    message=None,
    summarize=20,
    print_tensor_name=True,
    print_tensor_type=True,
    print_tensor_shape=True,
    print_tensor_layout=True,
    print_tensor_lod=True,
    print_phase='both',
):
Y
Yan Chunwei 已提交
373
    '''
374 375
    :api_attr: Static Graph

Y
Yan Chunwei 已提交
376 377 378 379 380 381 382 383 384
    **Print operator**

    This creates a print op that will print when a tensor is accessed.

    Wraps the tensor passed in so that whenever that a tensor is accessed,
    the message `message` is printed, along with the current value of the
    tensor `t`.

    Args:
Y
yangyaming 已提交
385
        input (Variable): A Tensor to print.
386
        summarize (int): Number of elements in the tensor to be print. If it's
T
tianshuo78520a 已提交
387
                value is -1, then all elements in the tensor will be print.
Y
yangyaming 已提交
388 389
        message (str): A string message to print as a prefix.
        first_n (int): Only log `first_n` number of times.
390 391 392
        print_tensor_name (bool, optional): Print the tensor name. Default: True.
        print_tensor_type (bool, optional): Print the tensor type. Defaultt: True.
        print_tensor_shape (bool, optional): Print the tensor shape. Default: True.
393
        print_tensor_layout (bool, optional): Print the tensor layout. Default: True.
394
        print_tensor_lod (bool, optional): Print the tensor lod. Default: True.
395
        print_phase (str): Which phase to displace, including 'forward',
396
                'backward' and 'both'. Default: 'both'. If set to 'backward', will
397 398
                only print the gradients of input tensor; If set to 'both', will
                both print the input tensor itself and the gradients of input tensor.
Y
Yan Chunwei 已提交
399 400

    Returns:
401
        Variable: Output tensor.
Y
Yan Chunwei 已提交
402

403 404 405 406
    NOTES:
        The input and output are two different variables, and in the
        following process, you should use the output variable but not the input,
        otherwise, the print layer doesn't have backward.
Y
Yan Chunwei 已提交
407

Y
Yan Chunwei 已提交
408 409
    Examples:
        .. code-block:: python
410

411 412 413
           import paddle

           paddle.enable_static()
414

415 416 417 418 419 420 421 422 423 424 425 426 427 428
           x = paddle.full(shape=[2, 3], fill_value=3, dtype='int64')
           out = paddle.static.Print(x, message="The content of input layer:")

           main_program = paddle.static.default_main_program()
           exe = paddle.static.Executor(place=paddle.CPUPlace())
           res = exe.run(main_program, fetch_list=[out])
           # Variable: fill_constant_1.tmp_0
           #   - message: The content of input layer:
           #   - lod: {}
           #   - place: CPUPlace
           #   - shape: [2, 3]
           #   - layout: NCHW
           #   - dtype: long
           #   - data: [3 3 3 3 3 3]
Y
Yan Chunwei 已提交
429
    '''
430 431 432 433 434 435
    check_variable_and_dtype(
        input,
        'input',
        ['float32', 'float64', 'int32', 'int64', 'bool'],
        'fluid.layers.Print',
    )
436

437 438
    helper = LayerHelper('print' + "_" + input.name, **locals())
    output = helper.create_variable_for_type_inference(input.dtype)
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
    helper.append_op(
        type='print',
        inputs={'In': input},
        outputs={'Out': output},
        attrs={
            'first_n': first_n,
            'summarize': summarize,
            'message': message or "",
            'print_tensor_name': print_tensor_name,
            'print_tensor_type': print_tensor_type,
            'print_tensor_shape': print_tensor_shape,
            'print_tensor_layout': print_tensor_layout,
            'print_tensor_lod': print_tensor_lod,
            'print_phase': print_phase.upper(),
        },
    )
455
    return output
Y
Yan Chunwei 已提交
456 457


H
Huihuang Zheng 已提交
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
def Assert(cond, data=None, summarize=20, name=None):
    '''
    This API creates an op that asserts the given condition is true. If the
    condition is false, prints the tensors in data. ``summarize`` specifies the
    number of the elements in the tensors to print.

    Args:
        cond (Variable): The boolean condition tensor whose numel should be 1.
        data (list|tuple, optional): list or tuple of tensors to print when
            condition is not true. If it's ``None``, no tensor will be printed.
            The default value is ``None``.
        summarize (int, optional): Number of elements in the tensor to be
            printed. If its value is -1, then all elements in the tensor will
            be printed. The default value is 20.
        name (str, optional): The default value is ``None`` . Normally users
            don't have to set this parameter. For more information, please
            refer to :ref:`api_guide_Name` .

    Returns:
        Operator: the created operation.

    Raises:
        TypeError: If ``cond`` is not boolean Variable.
        TypeError: If ``data`` is not a list or tuple or ``None``.
        TypeError: If ``summarize`` is not int.
        TypeError: If ``name`` is not a string or ``None`` .
        fluid.core.EnforceNotMet: If the condition is False in running time.

    Examples:
        .. code-block:: python

            import paddle.fluid as fluid
            import paddle.fluid.layers as layers

            x = layers.fill_constant(shape=[2, 3], dtype='float32', value=2.0)
            condition = layers.reduce_max(x) < 1.0 # False
            layers.Assert(condition, [x], 10, "example_assert_layer")

            exe = fluid.Executor()
            try:
                exe.run(fluid.default_main_program())
                # Print x and throws paddle.fluid.core.EnforceNotMet exception
                # Example printed message for x:
                #
                # Variable: fill_constant_0.tmp_0
                #   - lod: {}
                #   - place: CPUPlace()
                #   - shape: [2, 3]
                #   - layout: NCHW
                #   - dtype: float
                #   - data: [2 2 2 2 2 2]
            except fluid.core.EnforceNotMet as e:
                print("Assert Exception Example")

    '''
    check_variable_and_dtype(cond, "cond", ["bool"], "fluid.layers.Assert")
    check_type(data, "data", (list, tuple, type(None)), "fluid.layers.Assert")
    check_type(summarize, "summarize", int, "fluid.layers.Assert")
    check_type(name, "name", (str, type(None)), "fluid.layers.Assert")

    layer_name = name if name else ('assert_' + cond.name)
    helper = LayerHelper(layer_name, **locals())

521 522 523 524 525
    op = helper.append_op(
        type="assert",
        inputs={"Cond": cond, "Data": [] if data is None else list(data)},
        attrs={"summarize": summarize},
    )
H
Huihuang Zheng 已提交
526 527 528 529

    return op


530
class BlockGuard:
Y
Yu Yang 已提交
531
    """
532 533 534 535
    BlockGuard class.

    BlockGuard class is used to create a sub-block in a program by
    using the Python `with` keyword.
Y
Yu Yang 已提交
536 537
    """

538 539
    def __init__(self, main_program):
        if not isinstance(main_program, Program):
Y
Yu Yang 已提交
540
            raise TypeError("BlockGuard takes a program")
541
        self.main_program = main_program
Y
Yu Yang 已提交
542 543

    def __enter__(self):
W
Wu Yi 已提交
544
        self.main_program._create_block()
Y
Yu Yang 已提交
545 546

    def __exit__(self, exc_type, exc_val, exc_tb):
W
Wu Yi 已提交
547
        self.main_program._rollback()
Y
Yu Yang 已提交
548 549 550 551 552
        if exc_type is not None:
            return False  # re-raise exception
        return True


Y
Yang Yang 已提交
553 554 555 556 557
class BlockGuardWithCompletion(BlockGuard):
    """
    BlockGuardWithCompletion class.

    BlockGuardWithCompletion class is used to create an op with a block in a program.
558 559
    """

Y
Yu Yang 已提交
560
    def __init__(self, rnn):
X
Xin Pan 已提交
561
        if not isinstance(rnn, StaticRNN):
X
Xin Pan 已提交
562
            raise TypeError("BlockGuardWithCompletion takes a StaticRNN")
563
        super().__init__(rnn.helper.main_program)
Y
Yu Yang 已提交
564 565 566 567
        self.rnn = rnn

    def __enter__(self):
        self.rnn.status = StaticRNN.IN_RNN_BLOCK
568
        return super().__enter__()
Y
Yu Yang 已提交
569 570

    def __exit__(self, exc_type, exc_val, exc_tb):
Y
Yu Yang 已提交
571 572
        if exc_type is not None:
            return False
Y
Yu Yang 已提交
573
        self.rnn.status = StaticRNN.AFTER_RNN_BLOCK
574
        self.rnn._complete_op()
575
        return super().__exit__(exc_type, exc_val, exc_tb)
Y
Yu Yang 已提交
576 577


578
class StaticRNNMemoryLink:
Y
Yu Yang 已提交
579
    """
580 581 582 583
    StaticRNNMemoryLink class.

    StaticRNNMemoryLink class is used to create a link between two
    memory cells of a StaticRNN.
Y
yuyang18 已提交
584 585 586 587 588 589 590 591 592


    NOTE: This is a internal data structure of a very low-level API.
    Please use StaticRNN instead.

    Args:
        init(Variable): the initial variable for Memory.
        pre_mem(Variable): the memory variable in previous time step.
        mem(Variable): the memory variable in current time step.
Y
Yu Yang 已提交
593 594 595 596 597 598 599 600
    """

    def __init__(self, init, pre_mem, mem=None):
        self.init = init
        self.pre_mem = pre_mem
        self.mem = mem


601
class StaticRNN:
602
    """
603 604
    :api_attr: Static Graph

605 606
    StaticRNN class.

607 608 609 610 611 612 613
    The StaticRNN can process a batch of sequence data. The first dimension of inputs
    represents sequence length, the length of each input sequence must be equal.
    StaticRNN will unfold sequence into time steps, user needs to define how to process
    each time step during the :code:`with` step.

    Args:
        name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
C
chengduo 已提交
614 615

    Examples:
616 617
        .. code-block:: python

618
            import paddle
619 620 621 622
            import paddle.fluid as fluid
            import paddle.fluid.layers as layers

            vocab_size, hidden_size=10000, 200
623
            paddle.enable_static()
624 625
            x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
            # create word sequence
626 627 628 629 630
            x_emb = layers.embedding(
                input=x,
                size=[vocab_size, hidden_size],
                dtype='float32',
                is_sparse=False)
631
            # transform batch size to dim 1
632
            x_emb = paddle.transpose(x_emb, perm=[1, 0, 2])
633 634 635

            rnn = fluid.layers.StaticRNN()
            with rnn.step():
636
                # mark created x_emb as input, each step process a word
637
                word = rnn.step_input(x_emb)
638
                # create prev memory parameter, batch size comes from word
639 640
                prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
                hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
641 642
                # use hidden to update prev
                rnn.update_memory(prev, hidden)
643
                # mark hidden as output
644
                rnn.step_output(hidden)
645
            # get StaticrNN final output
646
            result = rnn()
C
chengduo 已提交
647

648
    """
649

Y
Yu Yang 已提交
650 651 652 653
    BEFORE_RNN_BLOCK = 0
    IN_RNN_BLOCK = 1
    AFTER_RNN_BLOCK = 2

654
    def __init__(self, name=None):
655
        check_type(name, "name", (str, type(None)), "fluid.layers.StaticRNN")
656
        self.helper = LayerHelper("static_rnn", name=name)
Y
Yu Yang 已提交
657 658 659 660 661 662 663 664
        self.memories = {}  # memory map, from pre_mem.name --> MemoryLink
        self.inputs = []  # input variable list in current block
        self.outputs = []  # output variable list in parent block
        self.status = StaticRNN.BEFORE_RNN_BLOCK  # status flag.
        # sequence length, since it is a static RNN, sequence length are fixed.
        self.seq_len = None

    def step(self):
C
chengduo 已提交
665
        """
666 667
        Define operators in each step. step is used in :code:`with` block, OP in :code:`with` block
        will be executed sequence_len times (sequence_len is the length of input)
C
chengduo 已提交
668
        """
Y
Yang Yang 已提交
669
        return BlockGuardWithCompletion(self)
Y
Yu Yang 已提交
670 671 672 673 674

    def _assert_in_rnn_block_(self, method):
        if self.status != StaticRNN.IN_RNN_BLOCK:
            raise ValueError("You must invoke {0} in rnn block".format(method))

675 676 677 678 679 680 681 682 683
    def memory(
        self,
        init=None,
        shape=None,
        batch_ref=None,
        init_value=0.0,
        init_batch_dim_idx=0,
        ref_batch_dim_idx=1,
    ):
684
        """
C
chengduo 已提交
685 686 687
        Create a memory variable for static rnn.
        If the :code:`init` is not None, :code:`memory` will be initialized by
        this Variable. If the :code:`init` is None, :code:`shape` and :code:`batch_ref`
688 689
        must be set, and this function will create a new variable with shape and batch_ref
        to initialize :code:`init` Variable.
C
chengduo 已提交
690

691
        Args:
692
            init(Variable, optional): Tensor used to init memory. If it is not set,
C
chengduo 已提交
693 694
                :code:`shape` and :code:`batch_ref` must be provided.
                Default: None.
695 696 697 698 699 700 701
            shape(list|tuple): When :code:`init` is None use this arg to initialize memory shape.
            NOTE the shape does not contain batch_size. Default: None.
            batch_ref(Variable, optional): When :code:`init` is None, memory's batch size will
            be set as batch_ref's ref_batch_dim_idx value. Default: None.
            init_value(float, optional): When :code:`init` is None, used to init memory's value. Default: 0.0.
            init_batch_dim_idx(int, optional): the batch_size axis of the :code:`init` Variable. Default: 0.
            ref_batch_dim_idx(int, optional): the batch_size axis of the :code:`batch_ref` Variable. Default: 1.
C
chengduo 已提交
702 703

        Returns:
704 705 706 707 708
            Variable: The memory variable.

        Examples 1:
            .. code-block:: python

709
                import paddle
710 711 712 713
                import paddle.fluid as fluid
                import paddle.fluid.layers as layers

                vocab_size, hidden_size=10000, 200
714
                paddle.enable_static()
715 716 717 718 719 720 721 722
                x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
                # create word sequence
                x_emb = layers.embedding(
                        input=x,
                        size=[vocab_size, hidden_size],
                        dtype='float32',
                        is_sparse=False)
                # transform batch size to dim 1
723
                x_emb = paddle.transpose(x_emb, perm=[1, 0, 2])
724 725 726 727 728 729 730 731 732 733

                rnn = fluid.layers.StaticRNN()
                with rnn.step():
                        # mark created x_emb as input, each step process a word
                        word = rnn.step_input(x_emb)
                        # create prev memory parameter, batch size comes from word
                        prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
                        hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
                        # use hidden to update prev
                        rnn.update_memory(prev, hidden)
734 735 736


        Examples 2:
737 738
            .. code-block:: python

739
                import paddle
740 741 742
                import paddle.fluid as fluid
                import paddle.fluid.layers as layers
                vocab_size, hidden_size=10000, 200
743
                paddle.enable_static()
744 745 746 747 748 749 750 751
                x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
                # create word sequence
                x_emb = layers.embedding(
                        input=x,
                        size=[vocab_size, hidden_size],
                        dtype='float32',
                        is_sparse=False)
                # transform batch size to dim 1
752
                x_emb = paddle.transpose(x_emb, perm=[1, 0, 2])
753 754 755 756 757 758 759 760 761 762
                boot_memory = fluid.layers.data(name='boot', shape=[hidden_size], dtype='float32', lod_level=1)
                rnn = fluid.layers.StaticRNN()
                with rnn.step():
                        # mark created x_emb as input, each step process a word
                        word = rnn.step_input(x_emb)
                        # init memory
                        prev = rnn.memory(init=boot_memory)
                        hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
                        # update hidden with prev
                        rnn.update_memory(prev, hidden)
763

764
        """
Y
Yu Yang 已提交
765
        self._assert_in_rnn_block_('memory')
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
        check_type(
            init,
            "init",
            (Variable, type(None)),
            "fluid.layers.StaticRNN.memory",
        )
        check_type(
            shape,
            "shape",
            (list, tuple, type(None)),
            "fluid.layers.StaticRNN.memory",
        )
        check_type(
            batch_ref,
            "batch_ref",
            (Variable, type(None)),
            "fluid.layers.StaticRNN.memory",
        )
Y
Yu Yang 已提交
784
        if init is None:
785
            if shape is None or batch_ref is None:
Y
Yu Yang 已提交
786
                raise ValueError(
787 788
                    "if init is None, memory at least need shape and batch_ref"
                )
789
            parent_block = self._parent_block()
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
            var_name = unique_name.generate_with_ignorable_key(
                "@".join([self.helper.name, "memory_boot"])
            )
            boot_var = parent_block.create_var(
                name=var_name,
                shape=shape,
                dtype=batch_ref.dtype,
                persistable=False,
            )

            parent_block.append_op(
                type="fill_constant_batch_size_like",
                inputs={'Input': [batch_ref]},
                outputs={'Out': [boot_var]},
                attrs={
                    'value': init_value,
                    'shape': boot_var.shape,
                    'dtype': boot_var.dtype,
                    'input_dim_idx': ref_batch_dim_idx,
                    'output_dim_idx': init_batch_dim_idx,
                },
            )
Y
Yu Yang 已提交
812 813 814 815

            return self.memory(init=boot_var)
        else:
            pre_mem = self.helper.create_variable(
816 817 818
                name=unique_name.generate_with_ignorable_key(
                    "@".join([self.helper.name, "mem"])
                ),
F
fengjiayi 已提交
819
                dtype=init.dtype,
820 821 822 823 824
                shape=init.shape,
            )
            self.memories[pre_mem.name] = StaticRNNMemoryLink(
                init=init, pre_mem=pre_mem
            )
Y
Yu Yang 已提交
825 826 827
            return pre_mem

    def step_input(self, x):
C
chengduo 已提交
828 829 830 831 832 833 834 835
        """
        Mark a sequence as a StaticRNN input.

        Args:
            x(Variable): The input sequence, the shape of x
                should be [seq_len, ...].

        Returns:
836 837 838 839 840
            Variable: The current time step data in the input sequence.

        Examples:
            .. code-block:: python

841
                import paddle
842 843 844 845
                import paddle.fluid as fluid
                import paddle.fluid.layers as layers

                vocab_size, hidden_size=10000, 200
846
                paddle.enable_static()
847 848 849 850 851 852 853 854
                x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
                # create word sequence
                x_emb = layers.embedding(
                        input=x,
                        size=[vocab_size, hidden_size],
                        dtype='float32',
                        is_sparse=False)
                # transform batch size to dim 1
855
                x_emb = paddle.transpose(x_emb, perm=[1, 0, 2])
856 857 858 859 860 861 862 863 864 865

                rnn = fluid.layers.StaticRNN()
                with rnn.step():
                        # mark created x_emb as input, each step process a word
                        word = rnn.step_input(x_emb)
                        # create prev memory parameter, batch size comes from word
                        prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
                        hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
                        # use hidden to update prev
                        rnn.update_memory(prev, hidden)
866

C
chengduo 已提交
867
        """
Y
Yu Yang 已提交
868
        self._assert_in_rnn_block_('step_input')
869
        check_type(x, "x", Variable, "fluid.layers.StaticRNN.step_input")
Y
Yu Yang 已提交
870
        if self.seq_len is None:
Y
Yu Yang 已提交
871
            self.seq_len = x.shape[0]
872
        elif x.shape[0] != -1 and self.seq_len != x.shape[0]:
Y
Yu Yang 已提交
873 874
            raise ValueError("Static RNN only take fix seq_len input")

875 876 877
        ipt = self.helper.create_variable(
            name=x.name, dtype=x.dtype, shape=list(x.shape[1:]), type=x.type
        )
Y
Yu Yang 已提交
878 879 880 881
        self.inputs.append(ipt)
        return ipt

    def step_output(self, o):
C
chengduo 已提交
882 883 884 885 886 887 888 889
        """
        Mark a sequence as a StaticRNN output.

        Args:
            o(Variable): The output sequence.

        Returns:
            None.
890 891 892 893

        Examples:
            .. code-block:: python

894
                import paddle
895 896 897 898
                import paddle.fluid as fluid
                import paddle.fluid.layers as layers

                vocab_size, hidden_size=10000, 200
899
                paddle.enable_static()
900 901 902 903 904 905 906 907
                x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
                # create word sequence
                x_emb = layers.embedding(
                        input=x,
                        size=[vocab_size, hidden_size],
                        dtype='float32',
                        is_sparse=False)
                # transform batch size to dim 1
908
                x_emb = paddle.transpose(x_emb, perm=[1, 0, 2])
909 910 911 912 913 914 915 916 917 918 919 920 921

                rnn = fluid.layers.StaticRNN()
                with rnn.step():
                        # mark created x_emb as input, each step process a word
                        word = rnn.step_input(x_emb)
                        # create prev memory parameter, batch size comes from word
                        prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
                        hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
                        # use hidden to update prev
                        rnn.update_memory(prev, hidden)
                        rnn.step_output(hidden)

                result = rnn()
922

C
chengduo 已提交
923
        """
Y
Yu Yang 已提交
924
        self._assert_in_rnn_block_('step_output')
925
        check_type(o, "o", Variable, "fluid.layers.StaticRNN.step_output")
Y
Yu Yang 已提交
926

X
Xin Pan 已提交
927
        tmp_o = self.helper.create_variable_for_type_inference(dtype=o.dtype)
928 929 930 931 932 933
        self.helper.append_op(
            type='rnn_memory_helper',
            inputs={'X': [o]},
            outputs={'Out': tmp_o},
            attrs={'dtype': o.dtype},
        )
Y
Yu Yang 已提交
934

935 936 937 938 939
        out_var = self._parent_block().create_var(
            name=tmp_o.name,
            shape=[self.seq_len] + list(tmp_o.shape),
            dtype=tmp_o.dtype,
        )
Y
Yu Yang 已提交
940 941 942 943

        self.outputs.append(out_var)

    def output(self, *outputs):
C
chengduo 已提交
944 945 946 947
        """
        Mark the StaticRNN output variables.

        Args:
948
            outputs: The output Tensor, can mark multiple variables as output
C
chengduo 已提交
949 950 951

        Returns:
            None
952 953 954 955

        Examples:
            .. code-block:: python

956
                import paddle
957 958 959 960
                import paddle.fluid as fluid
                import paddle.fluid.layers as layers

                vocab_size, hidden_size=10000, 200
961
                paddle.enable_static()
962 963 964 965 966 967 968 969
                x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
                # create word sequence
                x_emb = layers.embedding(
                        input=x,
                        size=[vocab_size, hidden_size],
                        dtype='float32',
                        is_sparse=False)
                # transform batch size to dim 1
970
                x_emb = paddle.transpose(x_emb, perm=[1, 0, 2])
971 972 973 974 975 976 977 978 979 980 981 982 983 984

                rnn = fluid.layers.StaticRNN()
                with rnn.step():
                        # mark created x_emb as input, each step process a word
                        word = rnn.step_input(x_emb)
                        # create prev memory parameter, batch size comes from word
                        prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
                        hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
                        # use hidden to update prev
                        rnn.update_memory(prev, hidden)
                        # mark each step's hidden and word as output
                        rnn.output(hidden, word)

                result = rnn()
C
chengduo 已提交
985
        """
Y
Yu Yang 已提交
986 987 988 989
        for each in outputs:
            self.step_output(each)

    def update_memory(self, mem, var):
C
chengduo 已提交
990
        """
991
        Update the memory from :code:`mem` to :code:`var`.
C
chengduo 已提交
992 993 994

        Args:
            mem(Variable): the memory variable.
995
            var(Variable): the plain variable generated in RNN block, used to update memory.
T
tianshuo78520a 已提交
996
                           var and mem should have same dims and data type.
C
chengduo 已提交
997 998 999

        Returns:
            None
1000

C
chengduo 已提交
1001
        """
1002 1003
        check_type(mem, "mem", Variable, "fluid.layers.StaticRNN.update_memory")
        check_type(var, "var", Variable, "fluid.layers.StaticRNN.update_memory")
Y
Yu Yang 已提交
1004 1005
        self.memories[mem.name].mem = var

1006
    def _parent_block(self):
1007
        prog = self.helper.main_program
Y
Yu Yang 已提交
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
        parent_idx = prog.current_block().parent_idx
        assert parent_idx >= 0
        parent_block = prog.block(parent_idx)
        return parent_block

    def __call__(self, *args, **kwargs):
        if self.status != StaticRNN.AFTER_RNN_BLOCK:
            raise ValueError("RNN output can only be retrieved after rnn block")
        if len(self.outputs) == 0:
            raise ValueError("RNN has no output")
        elif len(self.outputs) == 1:
            return self.outputs[0]
        else:
            return self.outputs

1023
    def _complete_op(self):
1024 1025
        main_program = self.helper.main_program
        rnn_block = main_program.current_block()
1026
        parent_block = self._parent_block()
Y
Yu Yang 已提交
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040

        local_inputs = set()

        for op in rnn_block.ops:
            assert isinstance(op, Operator)
            for oname in op.output_names:
                for out_var_name in op.output(oname):
                    local_inputs.add(out_var_name)

        for var in self.inputs:
            local_inputs.add(var.name)
        for m in self.memories:
            local_inputs.add(m)

C
chengduo 已提交
1041 1042 1043
        # NOTE(zcd): the params have two categories of variables.
        #   - the variables that are the out of StaticRnn.
        #   - the variables that are the parameters of some layers, for example, conv2d.
Y
Yu Yang 已提交
1044 1045 1046 1047 1048 1049 1050 1051
        params = list()
        for op in rnn_block.ops:
            assert isinstance(op, Operator)
            for iname in op.input_names:
                for in_var_name in op.input(iname):
                    if in_var_name not in local_inputs:
                        params.append(in_var_name)

1052 1053 1054
        parameters = [
            parent_block._find_var_recursive(name) for name in set(params)
        ]
Y
Yu Yang 已提交
1055 1056

        step_scope = parent_block.create_var(
1057 1058
            type=core.VarDesc.VarType.STEP_SCOPES
        )
Y
Yu Yang 已提交
1059 1060 1061 1062

        inlinks = [parent_block.var(i.name) for i in self.inputs]
        outlinks = self.outputs

C
chengduo 已提交
1063
        # NOTE(zcd): the states maybe empty in some case.
Y
Yu Yang 已提交
1064 1065 1066
        boot_memories = []
        pre_memories = []
        memories = []
1067
        for _, mem in self.memories.items():
Y
Yu Yang 已提交
1068 1069
            boot_memories.append(mem.init)
            pre_memories.append(mem.pre_mem.name)
1070 1071 1072
            assert (
                mem.mem is not None
            ), "%s should be updated in every step." % (mem.init.name)
Y
Yu Yang 已提交
1073 1074
            mem_var = rnn_block.var(mem.mem.name)
            assert isinstance(mem_var, Variable)
X
Xin Pan 已提交
1075
            new_mem = self.helper.create_variable_for_type_inference(
1076 1077 1078 1079 1080 1081 1082 1083
                dtype=mem_var.dtype
            )
            rnn_block.append_op(
                type='rnn_memory_helper',
                inputs={'X': [mem_var]},
                outputs={'Out': [new_mem]},
                attrs={'dtype': mem_var.dtype},
            )
Y
Yu Yang 已提交
1084 1085 1086

            memories.append(new_mem.name)

1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
        parent_block.append_op(
            type='recurrent',
            inputs={
                'inputs': inlinks,
                'initial_states': boot_memories,
                'parameters': parameters,
            },
            outputs={'outputs': outlinks, 'step_scopes': [step_scope]},
            attrs={
                'has_states': len(pre_memories) > 0,
                'ex_states': pre_memories,
                'states': memories,
                'sub_block': rnn_block,
            },
        )
Y
Yu Yang 已提交
1102 1103


Y
Yang Yang(Tony) 已提交
1104 1105 1106 1107
class WhileGuard(BlockGuard):
    def __init__(self, while_op):
        if not isinstance(while_op, While):
            raise TypeError("WhileGuard takes a while op")
1108
        super().__init__(while_op.helper.main_program)
Y
Yang Yang(Tony) 已提交
1109 1110 1111 1112
        self.while_op = while_op

    def __enter__(self):
        self.while_op.status = While.IN_WHILE_BLOCK
1113
        return super().__enter__()
Y
Yang Yang(Tony) 已提交
1114 1115 1116 1117 1118

    def __exit__(self, exc_type, exc_val, exc_tb):
        if exc_type is not None:
            return False
        self.while_op.status = While.AFTER_WHILE_BLOCK
1119
        self.while_op._complete()
1120
        return super().__exit__(exc_type, exc_val, exc_tb)
Y
Yang Yang(Tony) 已提交
1121 1122


1123 1124 1125
def get_inputs_outputs_in_block(
    current_block, inner_inputs, inner_outputs, helper
):
1126 1127 1128 1129 1130 1131 1132 1133
    """
    Find inputs and outputs in current control flow block.
    :param current_block: Current control flow block.
    :param inner_inputs: Input var name of ops in current block.
    :param inner_outputs: Output var name of ops in current block.
    :return: inner_inputs, inner_outputs
    """

1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
    def is_ignore_vars(op, var_name):
        # NOTE(dev): There are some persistable var created in some non-standard API
        # such as "contrib.layers.shuffle_batch". It create a "Seed" used both in
        # Input and Output. This var shall not be considered as a loop_var in
        # control_flow.
        IGNORE_VAR_NAMES = {"shuffle_batch": ["shuffle_batch_seed"]}
        if op.type in IGNORE_VAR_NAMES:
            var_names = IGNORE_VAR_NAMES[op.type]
            for name in var_names:
                if name in var_name:
                    return True
        return False

1147 1148 1149 1150 1151 1152 1153 1154
    # Step1: update inner_inputs and inner_outputs
    # NOTE: Here assumes that all variables are input or output of Ops,
    # but some variables are created without appendding a real op.
    # For example, in `arr = create_array(dtype)`, `arr` is not a output of a op.
    for op in current_block.ops:
        assert isinstance(op, Operator)
        for iname in op.input_names:
            for in_var_name in op.input(iname):
1155
                if in_var_name not in inner_outputs and not is_ignore_vars(
1156 1157
                    op, in_var_name
                ):
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
                    inner_inputs.add(in_var_name)

        for oname in op.output_names:
            for out_var_name in op.output(oname):
                inner_outputs.add(out_var_name)

    # Step2: Remove LOD_TENSOR_ARRAY created in current control flow block.
    remove_inner_inputs = set()
    parent_block = helper.main_program.block(current_block.parent_idx)

    for in_var_name in inner_inputs:
        parent_block_var = parent_block._find_var_recursive(in_var_name)
        current_block_var = None
        if current_block.has_var(in_var_name):
            current_block_var = current_block.var(in_var_name)
1173 1174 1175 1176 1177
        if (
            not parent_block_var
            and current_block_var
            and current_block_var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY
        ):
1178 1179 1180 1181 1182 1183 1184
            remove_inner_inputs.add(in_var_name)

    inner_inputs = inner_inputs - remove_inner_inputs

    return inner_inputs, inner_outputs


1185
class While:
X
Xin Pan 已提交
1186
    """
1187
    :api_attr: Static Graph
1188

1189
    while loop control flow. Repeat while body until cond is False.
X
Xin Pan 已提交
1190

1191 1192 1193 1194
    Note:
        A new OP :ref:`api_fluid_layers_while_loop` is highly recommended instead of ``While`` if the shape of parameter ``cond`` is [1].
        OP :ref:`api_fluid_layers_while_loop` is easier to use and is called with less code but does the same thing as ``While`` .

1195 1196 1197 1198 1199 1200
    Notice:
        Local variables created in ``While`` are similar to that created in while of C++, and cannot be referenced externally.
        As a result, they cannot be obtained through ``fetch_list`` of ``Executor``. If you would like to access the variable
        out of ``while`` , PaddlePaddle provides ``assign`` API to assign local variables to external. Please refer to example
        code 2 or refer to `issue#22724 <https://github.com/PaddlePaddle/Paddle/issues/22724>`_.

X
Xin Pan 已提交
1201
    Args:
1202
        cond(Variable): A Tensor whose data type is bool controlling whether to continue looping.
G
guofei 已提交
1203
        is_test(bool, optional): A flag indicating whether execution is in test phase. Default value is False.
1204
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name` .
X
Xin Pan 已提交
1205

1206
    Examples 1:
X
Xin Pan 已提交
1207
          .. code-block:: python
1208

1209
            import paddle.fluid as fluid
1210 1211 1212 1213 1214
            import numpy as np

            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)           # loop counter

            loop_len = fluid.layers.fill_constant(shape=[1],dtype='int64', value=10)    # loop length
1215

L
LiYuRio 已提交
1216
            cond = paddle.less_than(x=i, y=loop_len)
1217
            while_op = fluid.layers.While(cond=cond)
1218
            with while_op.block():
1219
                i = fluid.layers.increment(x=i, value=1, in_place=True)
L
LiYuRio 已提交
1220
                paddle.assign(paddle.less_than(x=i, y=loop_len), cond)
1221 1222 1223 1224 1225

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())

            res = exe.run(fluid.default_main_program(), feed={}, fetch_list=[i])
1226 1227 1228 1229 1230 1231
            print(res) # [array([10])]


    Examples 2:
          .. code-block:: python

L
LiYuRio 已提交
1232
            import paddle
1233 1234 1235 1236 1237 1238 1239 1240 1241
            import paddle.fluid as fluid
            import numpy as np

            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            loop_len = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
            one = fluid.layers.fill_constant(shape=[1], dtype='float32', value=1)
            data = fluid.data(name='data', shape=[1], dtype='float32')
            sums = fluid.layers.fill_constant(shape=[1], dtype='float32', value=0)  # Define the variable to be obtained ouside of While, which name should be different from the variable inside the While to be obtained

L
LiYuRio 已提交
1242
            cond = paddle.less_than(x=i, y=loop_len)
1243 1244 1245 1246 1247 1248
            while_op = fluid.layers.While(cond=cond)
            with while_op.block():
                sums_tensor = fluid.layers.elementwise_add(x=data, y=data)
                fluid.layers.assign(sums_tensor, sums)  # Update the value of sums_tensor defined in While to the sums which defined outside of While through layers.assign
                i = fluid.layers.increment(x=i, value=1, in_place=True)
                data = fluid.layers.elementwise_add(x=data, y=one)
L
LiYuRio 已提交
1249
                paddle.assign(paddle.less_than(x=i, y=loop_len), cond)
1250 1251 1252 1253 1254 1255

            feed_data = np.ones(1).astype('float32')
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
            res = exe.run(fluid.default_main_program(), feed={'data': feed_data}, fetch_list=sums)
            print(res[0])  # [2.]    # Because the data in While does not update the value outside the While, the value of sums is [2.] after the loop
X
Xin Pan 已提交
1256 1257
    """

Y
Yang Yang(Tony) 已提交
1258 1259 1260 1261
    BEFORE_WHILE_BLOCK = 0
    IN_WHILE_BLOCK = 1
    AFTER_WHILE_BLOCK = 2

C
chengduo 已提交
1262
    def __init__(self, cond, is_test=False, name=None):
1263
        self.helper = LayerHelper("while", name=name)
Y
Yang Yang(Tony) 已提交
1264
        self.status = While.BEFORE_WHILE_BLOCK
1265
        check_variable_and_dtype(cond, 'cond', ['bool'], 'fluid.layers.While')
Y
Yang Yang(Tony) 已提交
1266
        if reduce(lambda a, b: a * b, cond.shape, 1) != 1:
1267
            raise TypeError(
1268 1269 1270 1271
                "condition expected shape as [1], but given shape as {0}.".format(
                    list(cond.shape)
                )
            )
Y
Yang Yang(Tony) 已提交
1272
        self.cond_var = cond
C
chengduo 已提交
1273
        self.is_test = is_test
Y
Yang Yang(Tony) 已提交
1274 1275 1276 1277

    def block(self):
        return WhileGuard(self)

1278
    def _complete(self):
Y
Yang Yang(Tony) 已提交
1279 1280
        main_program = self.helper.main_program
        while_block = main_program.current_block()
1281
        parent_block = main_program.block(
1282 1283
            main_program.current_block().parent_idx
        )
Y
Yang Yang(Tony) 已提交
1284 1285 1286

        inner_outputs = {self.cond_var.name}
        x_name_list = set()
1287
        x_name_list, inner_outputs = get_inputs_outputs_in_block(
1288 1289
            while_block, x_name_list, inner_outputs, self.helper
        )
Y
Yang Yang(Tony) 已提交
1290 1291 1292

        out_vars = []
        for inner_out_name in inner_outputs:
X
Xin Pan 已提交
1293 1294 1295
            inner_var = parent_block._find_var_recursive(inner_out_name)
            if inner_var:
                out_vars.append(inner_var)
Y
Yang Yang(Tony) 已提交
1296

1297
        x_name_list |= set(map(lambda x: x.name, out_vars))
1298 1299 1300
        # NOTE(dev): cond_var has been contained in Input('Condition'), so
        # we remove it from Input('X')
        x_name_list -= {self.cond_var.name}
1301

Y
Yang Yang(Tony) 已提交
1302
        step_scope = parent_block.create_var(
1303 1304
            type=core.VarDesc.VarType.STEP_SCOPES
        )
Y
Yang Yang(Tony) 已提交
1305 1306 1307 1308

        parent_block.append_op(
            type='while',
            inputs={
1309 1310 1311 1312 1313
                'X': [
                    parent_block._var_recursive(x_name)
                    for x_name in x_name_list
                ],
                'Condition': [self.cond_var],
1314
            },
1315 1316 1317
            outputs={'Out': out_vars, 'StepScopes': [step_scope]},
            attrs={'sub_block': while_block, "is_test": self.is_test},
        )
Y
Yang Yang(Tony) 已提交
1318 1319


1320
support_ret_buildin_type = (bool, float, int)
1321 1322


1323
def assign_skip_lod_tensor_array(input, output):
1324
    """
1325
    Assign input to output, but skip the process of copying LoDTensorArray unless it's created in while_block.
1326
    """
1327 1328

    def has_shape_diff(x_var, y_var):
1329 1330
        if len(x_var.shape) != len(y_var.shape):
            return True
1331
        for x_dim, y_dim in zip(x_var.shape, y_var.shape):
1332 1333
            if x_dim != y_dim and -1 not in [x_dim, y_dim]:
                return True
1334 1335
        return False

1336
    if not isinstance(input, (Variable, core.VarBase)):
1337
        if isinstance(output, Variable) and isinstance(
1338 1339
            input, support_ret_buildin_type
        ):
1340 1341 1342
            assign(input, output)
        else:
            output = input
1343 1344
        return

1345 1346
    if input.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
        main_program = input.block.program
1347
        parent_block = main_program.block(
1348 1349
            main_program.current_block().parent_idx
        )
1350 1351 1352
        if parent_block and not parent_block._find_var_recursive(input.name):
            assign(input, output)
    else:
1353 1354 1355 1356 1357
        if (
            isinstance(output, Variable)
            and isinstance(input, Variable)
            and has_shape_diff(input, output)
        ):
1358
            warnings.warn(
1359 1360 1361 1362
                "In dy2static mode, we attemp to assign a variable with shape {} into a variable with shape{}, which is not always right.".format(
                    input.shape, output.shape
                )
            )
1363
        assign(input, output)
1364 1365


G
guofei 已提交
1366
def while_loop(cond, body, loop_vars, is_test=False, name=None):
G
guofei 已提交
1367
    """
1368 1369
    :api_attr: Static Graph

G
guofei 已提交
1370 1371
    while_loop is one of the control flows. Repeats while_loop `body` until `cond` returns False.

1372 1373 1374 1375
    Notice:
        Local variables defined in ``body`` cannot be obtained through ``fetch_list`` of ``Executor`` , variables should
        be defined outside ``body`` and placed in ``loop_vars`` for looping, then these variables can be fetched by ``fetch_list`` .

G
guofei 已提交
1376
    Args:
1377
        cond(Callable): A callable returning a boolean tensor controlling whether to continue looping. And ``cond`` takes
1378
            as many arguments as ``loop_vars`` .
1379 1380 1381
        body(Callable): A callable returning a tuple or list of tensors or LoDTensorArrays of the same arity
            (length and structure) and types as ``loops_vars`` . And ``body`` takes as many arguments as ``loop_vars`` .
        loop_vars(list|tuple): A list or tuple of tensors or LoDTensorArrays that is passed to both ``cond`` and ``body`` .
G
guofei 已提交
1382
        is_test(bool, optional): A flag indicating whether execution is in test phase. Default value is False.
G
guofei 已提交
1383 1384
        name(str, optional): Normally there is no need for users to set this property. For more information, please
            refer to :ref:`api_guide_Name`. Default is None.
1385

G
guofei 已提交
1386
    Returns:
C
Chen Long 已提交
1387
        A list or tuple of Tensors or LoDTensorArrays which returned by ``body`` .
G
guofei 已提交
1388 1389 1390 1391

    Examples:
        .. code-block:: python

1392 1393 1394
            import paddle
            paddle.enable_static()

1395 1396
            def cond(i, ten):
                return i < ten
G
guofei 已提交
1397

1398 1399 1400
            def body(i, ten):
                i = i + 1
                return [i, ten]
G
guofei 已提交
1401

C
Chen Long 已提交
1402 1403 1404 1405 1406 1407
            main_program = paddle.static.default_main_program()
            startup_program = paddle.static.default_startup_program()
            with paddle.static.program_guard(main_program, startup_program):
                i = paddle.full(shape=[1], fill_value=0, dtype='int64')     # loop counter
                ten = paddle.full(shape=[1], fill_value=10, dtype='int64')  # loop length
                i, ten = paddle.static.nn.while_loop(cond, body, [i, ten])
1408

C
Chen Long 已提交
1409
                exe = paddle.static.Executor(paddle.CPUPlace())
1410
                res = exe.run(main_program, feed={}, fetch_list=[i])
G
guofei 已提交
1411 1412 1413 1414 1415 1416 1417 1418
                print(res) # [array([10])]
    """
    helper = LayerHelper('while_loop', **locals())

    if not callable(cond):
        raise TypeError("cond in while_loop should be callable")
    if not callable(body):
        raise TypeError("body in while_loop should be callable")
1419
    check_type(loop_vars, 'loop_vars', (list, tuple), 'fluid.layers.while_loop')
G
guofei 已提交
1420 1421 1422 1423
    if len(loop_vars) == 0:
        raise ValueError("loop_vars in while_loop should not be empty")

    pre_cond = cond(*loop_vars)
1424 1425 1426
    check_variable_and_dtype(
        pre_cond, 'var of cond returned', ['bool'], 'fluid.layers.while_loop'
    )
G
guofei 已提交
1427 1428
    if reduce(lambda a, b: a * b, pre_cond.shape, 1) != 1:
        raise TypeError(
1429
            "the shape of the variable returned by cond should be [1],"
1430 1431
            "but given shape as {0}.".format(list(pre_cond.shape))
        )
G
guofei 已提交
1432

J
Jiabin Yang 已提交
1433
    if _non_static_mode():
1434
        now_cond = pre_cond.numpy()[0]
1435
        while now_cond:
1436 1437 1438 1439 1440 1441
            output_vars = body(*loop_vars)
            if not isinstance(output_vars, (list, tuple)):
                output_vars = [output_vars]
            if len(output_vars) != len(loop_vars):
                raise ValueError(
                    "body in while_loop should return the same arity "
1442 1443
                    "(length and structure) and types as loop_vars"
                )
1444
            now_cond = cond(*output_vars).numpy()[0]
1445
            map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
1446 1447
        return loop_vars

G
guofei 已提交
1448
    while_loop_block = While(pre_cond, is_test, name)
1449
    has_mutable_vars_in_loop = hold_mutable_vars(loop_vars)
G
guofei 已提交
1450
    with while_loop_block.block():
1451 1452 1453 1454 1455 1456 1457 1458 1459
        # If a variable with mutable type is included in loop_vars, like `dict/list`,
        # modifying it in the body function will cause origin variable to be modified
        # synchronously. This will raise an assignment error out of while block.
        # Here we make a copy of the mutable vars to avoid this problem.
        if has_mutable_vars_in_loop:
            new_loop_vars = copy_mutable_vars(loop_vars)
            output_vars = body(*new_loop_vars)
        else:
            output_vars = body(*loop_vars)
1460 1461
        if not isinstance(output_vars, (list, tuple)):
            output_vars = [output_vars]
1462
        try:
1463
            loop_vars = _deal_with_undefined_var(output_vars, loop_vars)
1464 1465
            assert_same_structure(output_vars, loop_vars, check_types=False)
        except ValueError as e:
1466 1467
            raise ValueError(
                "body in while_loop should return the same arity "
1468 1469
                "(length and structure) as loop_vars: {0}".format(e)
            )
1470
        now_cond = cond(*output_vars)
1471
        map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
G
guofei 已提交
1472 1473 1474 1475
        assign(now_cond, pre_cond)
    return loop_vars


1476
def _deal_with_undefined_var(output_vars, loop_vars):
1477 1478 1479 1480 1481 1482 1483
    """Deal with undefined var cases, We create undefined variable based on the results of body().
    In Dy2Static, we use undefined var to represent the var created in control flow. This function
    expand the loop_vars and replace original loop_vars.
    1. UndefinedVar = Variable      # create a variable
    2. UndefinedVar = None          # create a undefined var with RETURN_NO_VALUE_MAGIC_NUM
    3. UndefinedVar = List(int)     # create a list of variable
    4. UndefinedVar = value         # create a variable
1484
    """
1485
    from paddle.jit.dy2static.utils import (
1486 1487 1488
        UndefinedVar,
        create_undefined_variable,
    )
1489 1490

    def create_var_like(o_var):
1491 1492 1493 1494
        if (
            isinstance(o_var, (Variable,) + support_ret_buildin_type)
            or o_var is None
        ):
1495
            return create_undefined_variable()
1496
        if is_sequence(o_var):
1497
            """
1498 1499 1500
            Create a complex container class inside the body of while, including Python list and python Dict
            """
            return map_structure(lambda x: create_undefined_variable(), o_var)
1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513

    if len(output_vars) != len(loop_vars):
        raise ValueError("The length of loop_vars should be the same.")

    results = []
    for o_var, l_var in zip(output_vars, loop_vars):
        if isinstance(l_var, UndefinedVar) or l_var is None:
            results.append(create_var_like(o_var))
        else:
            results.append(l_var)
    return results


1514
def lod_rank_table(x, level=0):
1515 1516
    """
    LoD Rank Table Operator. Given an input variable **x** and a level number
Y
yangyaming 已提交
1517 1518
    of LoD, this layer creates a LodRankTable object. A LoDRankTable object
    contains a list of bi-element tuples. Each tuple consists of an index and
1519
    a length, both of which are int type. Refering to specified level of LoD,
T
tianshuo78520a 已提交
1520
    the index is the sequence index number and the length represents the
Y
yangyaming 已提交
1521 1522
    sequence length. Please note that the list is ranked in descending order by
    the length. The following is an example:
Y
yangyaming 已提交
1523 1524 1525 1526

        .. code-block:: text

            x is a LoDTensor:
1527 1528
                x.lod = [[2,                1],
                         [5,             1, 1]]
Y
yangyaming 已提交
1529 1530
                x.data = [a, b, c, d, e, f, g]

Y
yangyaming 已提交
1531 1532 1533
            1. set level to 0:
                Create lod rank table:
                    lod_rank_table_obj = lod_rank_table(x, level=0)
Y
yangyaming 已提交
1534

Y
yangyaming 已提交
1535 1536 1537 1538 1539 1540 1541 1542 1543
                Get:
                    lod_rank_table_obj.items() = [(0, 2), (1, 1)]

            2. set level to 1:
                Create lod rank table:
                    lod_rank_table_obj = lod_rank_table(x, level=1)

                Get:
                    lod_rank_table_obj.items() = [(0, 5), (1, 1), (2, 1)]
Y
yangyaming 已提交
1544 1545 1546 1547

    Args:
        x (Variable): Input variable, a LoDTensor based which to create the lod
            rank table.
Y
yangyaming 已提交
1548 1549
        level (int): Specify the LoD level, on which to create the lod rank
            table.
Y
yangyaming 已提交
1550 1551 1552 1553 1554 1555 1556

    Returns:
        Variable: The created LoDRankTable object.

    Examples:
        .. code-block:: python

1557
            import paddle.fluid as fluid
Y
yangyaming 已提交
1558
            x = fluid.layers.data(name='x', shape=[10],
1559
                                  dtype='float32', lod_level=1)
Y
yangyaming 已提交
1560
            out = layers.lod_rank_table(x=x, level=0)
1561
    """
1562 1563 1564
    check_type(x, 'x', (Variable, list), 'lod_rank_table')
    if isinstance(x, (list)):
        for i, input_x in enumerate(x):
1565 1566 1567
            check_type(
                input_x, 'input[' + str(i) + ']', Variable, 'lod_rank_table'
            )
1568

Y
Yu Yang 已提交
1569
    helper = LayerHelper("lod_rank_table", **locals())
1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
    table = helper.create_variable(
        type=core.VarDesc.VarType.LOD_RANK_TABLE,
        name=unique_name.generate("lod_rank_table"),
    )
    helper.append_op(
        type='lod_rank_table',
        inputs={'X': x},
        outputs={'Out': table},
        attrs={'level': level},
    )
Y
Yu Yang 已提交
1580
    return table
Y
Yu Yang 已提交
1581 1582


Y
yuyang18 已提交
1583
@templatedoc()
1584
def max_sequence_len(rank_table):
Y
yuyang18 已提交
1585 1586 1587 1588 1589 1590 1591 1592
    """
    ${comment}

    >>> import paddle.fluid as fluid
    >>> x = fluid.layers.data(name='x', shape=[10], dtype='float32',
    >>>                       lod_level=1)
    >>> rank_table = layers.lod_rank_table(x=x, level=0)
    >>> max_seq_len = layers.max_sequence_len(rank_table)
Y
yangyaming 已提交
1593 1594

    Args:
Y
yuyang18 已提交
1595
        rank_table(${rank_table_type}): ${rank_table_comment}.
Y
yangyaming 已提交
1596 1597

    Returns:
Y
yuyang18 已提交
1598
        ${out_comment}.
F
fengjiayi 已提交
1599 1600
    """
    helper = LayerHelper("max_seqence_len", **locals())
X
Xin Pan 已提交
1601
    res = helper.create_variable_for_type_inference(dtype="int64")
1602 1603 1604 1605 1606
    helper.append_op(
        type="max_sequence_len",
        inputs={"RankTable": rank_table},
        outputs={"Out": res},
    )
F
fengjiayi 已提交
1607 1608 1609
    return res


1610
def increment(x, value=1.0, in_place=True):
1611
    """
1612 1613
    The OP is usually used for control flow to increment the data of :attr:`x` by an amount :attr:`value`.
    Notice that the number of elements in :attr:`x` must be equal to 1.
1614

1615
    Parameters:
T
tianshuo78520a 已提交
1616
        x (Variable): A tensor that must always contain only one element, its data type supports
1617 1618 1619
            float32, float64, int32 and int64.
        value (float, optional): The amount to increment the data of :attr:`x`. Default: 1.0.
        in_place (bool, optional): Whether the OP should be performed in-place. Default: True.
1620 1621

    Returns:
1622
        Variable: The elementwise-incremented tensor with the same shape and data type as :attr:`x`.
1623 1624 1625 1626

    Examples:
        .. code-block:: python

1627
          import paddle.fluid as fluid
1628 1629
          counter = fluid.layers.zeros(shape=[1], dtype='float32') # [0.]
          fluid.layers.increment(counter) # [1.]
1630
    """
H
hong 已提交
1631
    if in_dygraph_mode():
1632
        return _C_ops.increment_(x, value)
H
hong 已提交
1633

1634 1635 1636
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int32', 'int64'], 'increment'
    )
Y
Yu Yang 已提交
1637
    helper = LayerHelper("increment", **locals())
Y
Yang Yang(Tony) 已提交
1638
    if not in_place:
X
Xin Pan 已提交
1639
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
Y
Yang Yang(Tony) 已提交
1640 1641
    else:
        out = x
1642 1643 1644 1645 1646 1647
    helper.append_op(
        type='increment',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'step': float(value)},
    )
Y
Yang Yu 已提交
1648
    return out
Y
Yu Yang 已提交
1649 1650


1651
def array_write(x, i, array=None):
1652
    """
1653 1654 1655 1656
    This OP writes the input ``x`` into the i-th position of the ``array``
    :ref:`api_fluid_LoDTensorArray` and returns the modified array.
    If ``array`` is none, a new LoDTensorArray will be created and returned.
    This OP is often used together with :ref:`api_fluid_layers_array_read` OP.
1657 1658

    Args:
1659 1660 1661 1662
        x (Variable): The input data to be written into array. It's multi-dimensional
            Tensor or LoDTensor. Data type: float32, float64, int32, int64.
        i (Variable): 1-D Tensor with shape [1], which represents the position into which
            ``x`` is written. Data type: int64.
1663 1664
        array (LoDTensorArray, optional): The LoDTensorArray into which ``x`` is written.
            The default value is None, when a new LoDTensorArray will be created and returned
1665
            as a result.
1666

1667
    Returns:
1668
        Variable: The input ``array`` after ``x`` is written into.
1669 1670

    Examples:
D
dzhwinter 已提交
1671
        .. code-block:: python
1672

1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695
            import paddle.fluid as fluid
            tmp = fluid.layers.fill_constant(shape=[3, 2], dtype='int64', value=5)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
            # Write tmp into the position of arr with subscript 10 and return arr.
            arr = fluid.layers.array_write(tmp, i=i)

            # Now, arr is a LoDTensorArray with length 11. We can use array_read OP to read
            # the data at subscript 10 and print it out.
            item = fluid.layers.array_read(arr, i=i)
            input = fluid.layers.Print(item, message="The content of i-th LoDTensor:")
            main_program = fluid.default_main_program()
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(main_program)

            # The printed result is:
            # 1570533133    The content of i-th LoDTensor:  The place is:CPUPlace
            # Tensor[array_read_0.tmp_0]
            #    shape: [3,2,]
            #    dtype: l
            #    data: 5,5,5,5,5,5,

            # the output is 2-D Tensor with shape [3,2], which is tmp above.
            # dtype is the corresponding C++ data type, which may vary in different environments.
1696 1697
            # Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
            #       so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
1698 1699
            #       and '__int64' on Windows. They both represent 64-bit integer variables.

1700
    """
J
Jiabin Yang 已提交
1701
    if _non_static_mode():
1702 1703 1704 1705 1706 1707 1708 1709 1710
        assert isinstance(
            x, Variable
        ), "The input data 'x' in array_write must be Variable in dygraph mode"
        assert isinstance(
            i, Variable
        ), "The index 'i' in array_write must be Variable in dygraph mode"
        assert i.shape == [
            1
        ], "The shape of index 'i' should be [1] in dygraph mode"
1711
        i = i.numpy().item(0)
1712
        if array is None:
1713
            array = paddle.tensor.create_array(x.dtype)
1714
        assert isinstance(
1715 1716
            array, list
        ), "The 'array' in array_write must be a list in dygraph mode"
1717 1718 1719 1720 1721 1722 1723 1724 1725
        assert i <= len(
            array
        ), "The index 'i' should not be greater than the length of 'array' in dygraph mode"
        if i < len(array):
            array[i] = x
        else:
            array.append(x)
        return array

1726 1727
    check_variable_and_dtype(i, 'i', ['int64'], 'array_write')
    check_type(x, 'x', (Variable), 'array_write')
Y
Yu Yang 已提交
1728
    helper = LayerHelper('array_write', **locals())
1729
    if array is not None:
1730 1731 1732 1733
        if (
            not isinstance(array, Variable)
            or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY
        ):
1734
            raise TypeError(
1735 1736
                "array should be tensor array vairable in array_write Op"
            )
Y
Yu Yang 已提交
1737 1738 1739 1740
    if array is None:
        array = helper.create_variable(
            name="{0}.out".format(helper.name),
            type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
1741 1742 1743 1744 1745 1746 1747
            dtype=x.dtype,
        )
    helper.append_op(
        type='write_to_array',
        inputs={'X': [x], 'I': [i]},
        outputs={'Out': [array]},
    )
Y
Yu Yang 已提交
1748 1749 1750
    return array


1751
def array_read(array, i):
1752
    """
1753
    This OP is used to read data at the specified position from the input array
1754
    :ref:`api_fluid_LoDTensorArray` . ``array`` is the input array and ``i``
1755
    is the specified read position. This OP is often used together with
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767
    :ref:`api_fluid_layers_array_write` OP.

    Case 1:
    ::
        Input:
            The shape of first three tensors are [1], and that of the last one is [1,2]:
                array = ([0.6], [0.1], [0.3], [0.4, 0.2])
            And:
                i = [3]

        Output:
            output = [0.4, 0.2]
1768

K
kavyasrinet 已提交
1769
    Args:
1770 1771 1772
        array (LoDTensorArray): The input LoDTensorArray.
        i (Variable): 1-D Tensor, whose shape is [1] and dtype is int64. It represents the
            specified read position of ``array``.
1773

K
kavyasrinet 已提交
1774
    Returns:
1775
        Variable: The LoDTensor or Tensor that is read at the specified position of ``array``.
1776

K
kavyasrinet 已提交
1777
    Examples:
1778 1779
        .. code-block:: python

1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807
            # First we're going to create a LoDTensorArray, then we're going to write the Tensor into
            # the specified position, and finally we're going to read the Tensor at that position.
            import paddle.fluid as fluid
            arr = fluid.layers.create_array(dtype='float32')
            tmp = fluid.layers.fill_constant(shape=[3, 2], dtype='int64', value=5)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
            # tmp is the Tensor with shape [3,2], and if we write it into the position with subscript 10
            # of the empty-array: arr, then the length of arr becomes 11.
            arr = fluid.layers.array_write(tmp, i, array=arr)
            # Read the data of the position with subscript 10.
            item = fluid.layers.array_read(arr, i)

            # You can print out the data via executor.
            input = fluid.layers.Print(item, message="The LoDTensor of the i-th position:")
            main_program = fluid.default_main_program()
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(main_program)

            # The printed result is:

            # 1569588169  The LoDTensor of the i-th position: The place is:CPUPlace
            # Tensor[array_read_0.tmp_0]
            #    shape: [3,2,]
            #    dtype: l
            #    data: 5,5,5,5,5,5,

            # the output is 2-D Tensor with shape [3,2].
            # dtype is the corresponding C++ data type, which may vary in different environments.
1808 1809
            # Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
            #       so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
1810
            #       and '__int64' on Windows. They both represent 64-bit integer variables.
1811
    """
J
Jiabin Yang 已提交
1812
    if _non_static_mode():
1813
        assert isinstance(
1814 1815
            array, list
        ), "The 'array' in array_read must be list in dygraph mode"
1816 1817 1818 1819 1820 1821
        assert isinstance(
            i, Variable
        ), "The index 'i' in array_read must be Variable in dygraph mode"
        assert i.shape == [
            1
        ], "The shape of index 'i' should be [1] in dygraph mode"
1822
        i = i.numpy().item(0)
1823 1824
        return array[i]

1825
    check_variable_and_dtype(i, 'i', ['int64'], 'array_read')
Y
Yu Yang 已提交
1826
    helper = LayerHelper('array_read', **locals())
1827 1828 1829 1830
    if (
        not isinstance(array, Variable)
        or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY
    ):
Y
Yu Yang 已提交
1831
        raise TypeError("array should be tensor array vairable")
X
Xin Pan 已提交
1832
    out = helper.create_variable_for_type_inference(dtype=array.dtype)
1833 1834 1835 1836 1837
    helper.append_op(
        type='read_from_array',
        inputs={'X': [array], 'I': [i]},
        outputs={'Out': [out]},
    )
Y
Yu Yang 已提交
1838
    return out
Y
Yang Yu 已提交
1839 1840


Y
Yu Yang 已提交
1841
class ConditionalBlockGuard(BlockGuard):
F
fengjiayi 已提交
1842
    """
1843 1844 1845
    ConditionalBlockGuard is derived from BlockGuard. It is dedicated for
    holding a ConditionalBlock, and helping users entering and exiting the
    ConditionalBlock via Python's 'with' keyword. However, ConditionalBlockGuard
F
fengjiayi 已提交
1846 1847 1848
    is generally an internal component of IfElse, users should not use it directly.
    """

Y
Yu Yang 已提交
1849
    def __init__(self, block):
1850
        check_type(block, "block", ConditionalBlock, "ConditionalBlockGuard")
1851
        super().__init__(block.helper.main_program)
Y
Yu Yang 已提交
1852 1853 1854
        self.block = block

    def __enter__(self):
1855
        return super().__enter__()
Y
Yu Yang 已提交
1856 1857 1858

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.block.complete()
1859
        return super().__exit__(exc_type, exc_val, exc_tb)
Y
Yu Yang 已提交
1860 1861


1862
class ConditionalBlock:
Y
Yan Chunwei 已提交
1863 1864 1865 1866 1867 1868 1869 1870
    '''
    **ConditionalBlock**

    ConditionalBlock is an operator that bind a block to a specific condition,
    if the condition matches, the corresponding block will be executed.

    Args:
        inputs (Variable): bool conditions.
T
tianshuo78520a 已提交
1871
        is_scalar_condition (bool): whether the branch is controlled by a scalar.
Y
Yan Chunwei 已提交
1872 1873 1874 1875 1876
        name(str): name of this ConditionalBlock.

    Examples:
        .. code-block:: python

L
LiYuRio 已提交
1877
             import paddle
1878
             import paddle.fluid as fluid
L
LiYuRio 已提交
1879
             cond = paddle.less_than(x=label, y=limit)
Y
Yan Chunwei 已提交
1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
             true_image, false_image = layers.split_lod_tensor(
                 input=image, mask=cond)
             true_cond = layers.ConditionalBlock([true_image])

             with true_cond.block():
                 ...
             with false_cond.block():
                 ...
    '''

1890
    def __init__(self, inputs, is_scalar_condition=False, name=None):
Y
Yu Yang 已提交
1891
        for each_input in inputs:
1892
            check_type(each_input, "input", Variable, "ConditionalBlock")
Y
Yu Yang 已提交
1893
        self.inputs = inputs
1894
        self.is_scalar_condition = is_scalar_condition
1895
        self.helper = LayerHelper('conditional_block', name=name)
Y
Yu Yang 已提交
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905

    def block(self):
        return ConditionalBlockGuard(self)

    def complete(self):
        inside_block = self.helper.main_program.current_block()
        parent_block = self.helper.main_program.block(inside_block.parent_idx)

        intermediate = set()
        params = set()
1906 1907 1908
        params, intermediate = get_inputs_outputs_in_block(
            inside_block, params, intermediate, helper=self.helper
        )
Y
Yu Yang 已提交
1909

1910 1911 1912
        # Todo(liym27) Here assume that all params are in recursive parent block
        # but when minimize() called in control flow, some params may be in
        # conditional grad block
Y
Yu Yang 已提交
1913
        param_list = [
W
Wu Yi 已提交
1914
            parent_block._var_recursive(each_name) for each_name in params
Y
Yu Yang 已提交
1915 1916
        ]

X
Xin Pan 已提交
1917 1918 1919 1920 1921
        out_list = []
        for inner_out_name in intermediate:
            inner_var = parent_block._find_var_recursive(inner_out_name)
            if inner_var:
                out_list.append(inner_var)
Y
Yu Yang 已提交
1922 1923

        step_scope = parent_block.create_var(
1924 1925
            type=core.VarDesc.VarType.STEP_SCOPES
        )
1926
        conditional_block_op = parent_block.append_op(
Y
Yu Yang 已提交
1927 1928
            type='conditional_block',
            inputs={
1929 1930
                'Cond': self.inputs,
                'Input': param_list,
Y
Yu Yang 已提交
1931
            },
1932
            outputs={'Out': out_list, 'Scope': [step_scope]},
1933 1934
            attrs={
                'sub_block': inside_block,
1935 1936 1937
                'is_scalar_condition': self.is_scalar_condition,
            },
        )
1938

1939
        if self.need_append_conditional_block_grad(inside_block):
1940 1941 1942
            self.append_conditional_block_grad(
                parent_block, inside_block, conditional_block_op
            )
1943 1944 1945

    def need_append_conditional_block_grad(self, inside_block):
        grad_sub_block_idx = inside_block.backward_block_idx
1946
        inside_block_idx = inside_block.idx
1947

1948 1949
        # if inside_block have grad_block and grad_block is not itself,
        # we will append conditional block grad.
1950 1951 1952
        return (
            grad_sub_block_idx != -1 and grad_sub_block_idx != inside_block_idx
        )
1953

1954 1955 1956
    def append_conditional_block_grad(
        self, parent_block, inside_block, conditional_block_op
    ):
1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
        '''
        Append op `conditional_block_grad` manually.
        When `optimizer.minimize/append_backward` is called in Paddle control flow,
        grad ops will be appended before appending op `conditional_block` so that
        op `conditional_block_grad` can't be appended when calling
        `optimizer.minimize/append_backward`. After appending op `conditional_block`,
        `conditional_block_grad` is appended manually.

        Args:
            parent_block (Block): The block that `conditional_block_op` blongs to.
            inside_block (Block): The sub block of `conditional_block_op`.
            conditional_block_op (Operator): The forward op conditional_block.
        '''

        grad_sub_block_idx = inside_block.backward_block_idx
        grad_sub_block = self.helper.main_program.block(grad_sub_block_idx)

        intermediate = set()
        params = set()

        for each_op in grad_sub_block.ops:
            assert isinstance(each_op, Operator)
            for iname in each_op.input_names:
                for in_var_name in each_op.input(iname):
                    if in_var_name not in intermediate:
                        params.add(in_var_name)

            for oname in each_op.output_names:
                for out_var_name in each_op.output(oname):
                    intermediate.add(out_var_name)

        param_list = []
        for inner_input_name in params:
            inner_var = parent_block._find_var_recursive(inner_input_name)
            if inner_var:
1992
                param_list.append(inner_var.name)
1993 1994

        grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
1995 1996
            conditional_block_op.desc, set(), [grad_sub_block.desc]
        )
1997 1998 1999 2000 2001 2002 2003 2004 2005

        # append op_desc in grad_op_descs to target_block
        op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
        backward = core.op_proto_and_checker_maker.OpRole.Backward
        new_op_desc = parent_block.desc.append_op()
        new_op_desc.copy_from(grad_op_desc[0])
        new_op_desc._set_attr(op_role_attr_name, backward)
        # set input and output manually
        new_op_desc.set_input('Input', param_list)
2006 2007 2008
        new_op_desc.set_output(
            'Input@GRAD', [param + "@GRAD" for param in param_list]
        )
2009 2010 2011

        new_vars = set()
        for grad_var_name in new_op_desc.output_arg_names():
2012 2013 2014 2015
            if (
                grad_sub_block.desc.has_var_recursive(grad_var_name.encode())
                or grad_var_name == core.empty_var_name()
            ):
2016
                continue
2017
            grad_sub_block.desc.var(grad_var_name.encode())
2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031
            new_vars.add(grad_var_name)
            if grad_var_name not in op_grad_to_var:
                continue

        # infer_shape and infer_type
        new_op_desc.infer_var_type(grad_sub_block.desc)
        new_op_desc.infer_shape(grad_sub_block.desc)

        for arg in new_op_desc.output_arg_names():
            if arg in new_vars:
                _infer_var_data_type_shape_(arg, grad_sub_block)

        self.helper.main_program._sync_with_cpp()

2032

2033
def copy_var_to_parent_block(var, layer_helper):
2034 2035
    if not isinstance(var, Variable):
        return var
2036 2037
    prog = layer_helper.main_program
    parent_idx = prog.current_block().parent_idx
2038 2039 2040
    assert (
        parent_idx >= 0
    ), "Got wrong parent block index when assigning var to parent scope in control_flow"
2041 2042
    parent_block = prog.block(parent_idx)

2043 2044 2045 2046
    if (
        var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY
        and parent_block._find_var_recursive(var.name)
    ):
2047 2048
        parent_block_var = var
    else:
2049 2050 2051
        parent_block_var = parent_block.create_var(
            dtype=var.dtype, shape=var.shape, type=var.type
        )
2052
        assign(var, parent_block_var)
2053 2054 2055
    return parent_block_var


2056
def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None):
2057
    """
2058 2059 2060 2061 2062 2063 2064 2065 2066
    This API returns ``true_fn()`` if the predicate ``pred`` is true else
    ``false_fn()`` . Users could also set ``true_fn`` or ``false_fn`` to
    ``None`` if do nothing and this API will treat the callable simply returns
    ``None`` in this case.

    ``true_fn`` and ``false_fn`` should return same nest structure of tensors
    or both return ``None`` if user doens't like to return anything. A nest
    structure of tensors in PaddlePaddle is tensor(s), or tuple of tensors, or
    list of tensors.
2067 2068

    Note:
2069 2070 2071 2072
        1. The tuples or lists returned by ``true_fn`` and ``false_fn`` must have
        the same shape because of dataflow model of PaddlePaddle while the
        tensors in the tuples or the lists can have different shapes.

2073 2074 2075
        2. This API could be used under both static mode or dygraph mode. If it
        is in dygraph mode, the API only runs one branch based on condition.

2076
        3. If it is in static mode, any tensors or operations created outside
2077 2078 2079
        or inside of ``true_fn`` and ``false_fn`` will be in net building
        regardless of which branch is selected at runtime. This has frequently
        surprised users who expected a lazy semantics. For example:
2080 2081

        .. code-block:: python
2082 2083 2084 2085 2086

            import paddle

            a = paddle.zeros((1, 1))
            b = paddle.zeros((1, 1))
2087
            c = a * b
2088
            out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b)
2089

2090 2091 2092
        No matter whether ``a < b`` , ``c = a * b`` will be in net building and
        run. ``a + c`` and ``b * b`` will be in net building, but only one
        branch will be executed during runtime.
2093 2094

    Args:
2095
        pred(Tensor): A boolean tensor whose numel should be 1. The boolean
2096
            value determines whether to return the result of ``true_fn`` or
2097 2098 2099 2100 2101 2102
            ``false_fn`` .
        true_fn(callable, optional): A callable to be performed if ``pred`` is
            true. The default value is ``None`` .
        false_fn(callable, optional): A callable to be performed if ``pred`` is
            false. The default value is ``None`` .
        name(str, optional): The default value is ``None`` . Normally users
2103
             don't have to set this parameter. For more information, please
2104
             refer to :ref:`api_guide_Name` .
2105 2106 2107
        return_names(sequence of string, optional): The default value is ``None`` .
             Normally users don't have to set this parameters.  A sequence of strings
             to represents the name of returned vars.  The structure of sequence must
2108
             be same with return values of true_fn and false_fn.
2109 2110

    Returns:
2111
        Tensor|list(Tensor)|tuple(Tensor): returns ``true_fn()`` if the
2112
        predicate ``pred`` is true else ``false_fn()`` .
2113 2114 2115

    Raises:
        TypeError: if ``true_fn`` or ``false_fn`` is not callable.
2116 2117
        ValueError: if ``true_fn`` and ``false_fn`` don't return the same nest
            structure of tensors.
2118 2119 2120 2121

    Examples:
        .. code-block:: python

2122
            import paddle
2123 2124 2125 2126 2127 2128 2129 2130 2131 2132

            #
            # pseudocode:
            # if 0.1 < 0.23:
            #     return 1, True
            # else:
            #     return 3, 2
            #

            def true_func():
2133 2134 2135 2136
                return paddle.full(shape=[1, 2], dtype='int32',
                                   fill_value=1), paddle.full(shape=[2, 3],
                                                              dtype='bool',
                                                              fill_value=True)
2137

2138 2139

            def false_func():
2140 2141 2142 2143 2144
                return paddle.full(shape=[3, 4], dtype='float32',
                                   fill_value=3), paddle.full(shape=[4, 5],
                                                              dtype='int64',
                                                              fill_value=2)

2145

2146 2147
            x = paddle.full(shape=[1], dtype='float32', fill_value=0.1)
            y = paddle.full(shape=[1], dtype='float32', fill_value=0.23)
2148
            pred = paddle.less_than(x=x, y=y, name=None)
2149
            ret = paddle.static.nn.cond(pred, true_func, false_func)
2150
            # ret is a tuple containing 2 tensors
2151 2152
            # ret[0] = [[1 1]]
            # ret[1] = [[ True  True  True]
2153
            #           [ True  True  True]]
2154

2155
    """
J
Jiabin Yang 已提交
2156
    if _non_static_mode():
2157
        assert isinstance(pred, Variable), "The pred in cond must be Variable"
C
crystal 已提交
2158
        assert pred.size == 1, "condition input's numel should be 1"
2159 2160 2161 2162 2163
        pred = pred.numpy()[0]
        if pred:
            if true_fn is not None:
                if not callable(true_fn):
                    raise TypeError(
2164 2165 2166 2167
                        "The true_fn in cond must be callable, but received {}".format(
                            type(true_fn).__name__
                        )
                    )
2168 2169 2170 2171 2172
                return true_fn()
        else:
            if false_fn is not None:
                if not callable(false_fn):
                    raise TypeError(
2173 2174 2175 2176
                        "The false_fn in cond must be callable, but received {}".format(
                            type(false_fn).__name__
                        )
                    )
2177 2178 2179
                return false_fn()
        return None

2180 2181
    check_variable_and_dtype(pred, "pred", ['bool'], "fluid.layers.cond")
    check_type(name, "name", (str, type(None)), "fluid.layers.cond")
2182 2183 2184
    helper = LayerHelper('cond', **locals())
    true_output = None
    false_output = None
2185
    copy_to_parent_func = lambda var: copy_var_to_parent_block(var, helper)
2186 2187
    if true_fn is not None:
        if not callable(true_fn):
2188 2189
            raise TypeError(
                "The true_fn in cond must be callable, but received {}".format(
2190 2191 2192
                    type(true_fn).__name__
                )
            )
2193 2194 2195 2196
        true_cond_block = ConditionalBlock([pred], is_scalar_condition=True)
        with true_cond_block.block():
            origin_true_output = true_fn()
            if origin_true_output is not None:
2197 2198 2199
                true_output = map_structure(
                    copy_to_parent_func, origin_true_output
                )
2200 2201
    if false_fn is not None:
        if not callable(false_fn):
2202 2203
            raise TypeError(
                "The false_fn in cond must be callable, but received {}".format(
2204 2205 2206 2207
                    type(false_fn).__name__
                )
            )
        false_cond_block = ConditionalBlock(
2
201716010711 已提交
2208
            [paddle.logical_not(pred)], is_scalar_condition=True
2209
        )
2210 2211 2212
        with false_cond_block.block():
            origin_false_output = false_fn()
            if origin_false_output is not None:
2213 2214 2215
                false_output = map_structure(
                    copy_to_parent_func, origin_false_output
                )
2216 2217 2218 2219 2220 2221 2222

    if true_output is None and false_output is None:
        return None

    if true_output is None:
        raise ValueError(
            "Incompatible return values of true_fn and false_fn in cond: "
2223 2224
            "true_fn returns None while false_fn returns non-None"
        )
2225 2226 2227
    if false_output is None:
        raise ValueError(
            "Incompatible return values of true_fn and false_fn in cond: "
2228 2229
            "true_fn returns non-None while false_fn returns None"
        )
2230

2231
    # Merge true and false output if they are not None
2232
    if return_names is None:
2233
        is_dy2staic = False
2234
        return_names = ["no name"] * len(_to_sequence_except_dict(true_output))
2235
    else:
2236
        """
2237 2238
        dy2static will set the return_names and expand the return values to UndefinedVar.
        """
2239 2240 2241 2242 2243 2244 2245
        is_dy2staic = True

        # TODO:  expand_undefined_var will replace None to Undefinedvar(), to fix cases like:
        #       a = None
        #       if condition:
        #           a = 1
        # Because we can not use variable to express 'None'
2246
        true_output, false_output = expand_undefined_var(
2247 2248
            true_output, false_output, return_names
        )
2249

2250 2251 2252
    if len(_to_sequence_except_dict(true_output)) != len(
        _to_sequence_except_dict(false_output)
    ):
2253
        raise ValueError(
2254
            "true fn returns {} vars, but false fn returns {} vars, which is not equals".format(
2255 2256
                len(_to_sequence_except_dict(true_output)),
                len(_to_sequence_except_dict(false_output)),
2257 2258 2259
            )
        )
    for true_out, false_out, return_name in zip(
2260 2261 2262
        _to_sequence_except_dict(true_output),
        _to_sequence_except_dict(false_output),
        _to_sequence_except_dict(return_names),
2263
    ):
2264 2265 2266 2267
        try:
            assert_same_structure(true_out, false_out, check_types=False)
        except ValueError as e:
            raise ValueError(
2268 2269 2270 2271
                "Incompatible return values of `{}` in true_fn and false_fn in cond: {}".format(
                    return_name, e
                )
            )
2272

2273
    def check_ret_none(seq_true, seq_false, seq_names):
2274 2275 2276
        for f_true, f_false, f_name in zip(seq_true, seq_false, seq_names):
            f_true = flatten(f_true)
            f_false = flatten(f_false)
2277
            for idx in range(len(f_true)):
2278 2279 2280 2281 2282 2283
                if (
                    f_true[idx] is None
                    and f_false[idx] is not None
                    or f_false[idx] is None
                    and f_true[idx] is not None
                ):
2284 2285 2286 2287
                    warnings.warn(
                        "In cond : Var '{}' or part of it is set differently in ifelse branchs, "
                        "<{}, {}> in true branch and <{}, {}> in false branch. Set var to "
                        "'None' in ifelse block might lead to error.".format(
2288
                            f_name,
2289 2290 2291 2292 2293 2294 2295 2296
                            type(f_true[idx]),
                            f_true[idx],
                            type(f_false[idx]),
                            f_false[idx],
                        )
                    )

    check_ret_none(
2297 2298 2299
        _to_sequence_except_dict(true_output),
        _to_sequence_except_dict(false_output),
        _to_sequence_except_dict(return_names),
2300
    )
2301 2302 2303

    if is_dy2staic:
        true_output, false_output = change_none_to_undefinedvar(
2304 2305
            true_output, false_output
        )
2306

2307
    mask = cast(pred, dtype='int32')
2308 2309 2310 2311 2312
    merge_func = (
        lambda name, false_var, true_var: select_input_with_buildin_type(
            [false_var, true_var], mask, name
        )
    )
2313 2314 2315 2316 2317

    def merge_every_var_list(false_vars, true_vars, name):
        return map_structure(partial(merge_func, name), false_vars, true_vars)

    merged_output = list(
2318 2319
        map(
            merge_every_var_list,
2320 2321 2322
            _to_sequence_except_dict(false_output),
            _to_sequence_except_dict(true_output),
            _to_sequence_except_dict(return_names),
2323 2324
        )
    )
2325
    merged_output = pack_sequence_as(false_output, flatten(merged_output))
2326 2327 2328
    return merged_output


2329
def change_none_to_undefinedvar(nest1, nest2):
2330
    from paddle.jit.dy2static.utils import UndefinedVar
2331 2332

    def map_fn(x):
2333 2334
        if x is None:
            return UndefinedVar("padding")
2335 2336 2337 2338 2339 2340 2341
        return x

    nest1_out = pack_sequence_as(nest1, list(map(map_fn, flatten(nest1))))
    nest2_out = pack_sequence_as(nest2, list(map(map_fn, flatten(nest2))))
    return nest1_out, nest2_out


2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359
def _to_sequence_except_dict(x):
    """
    In this function, dict is not viewed as sequence.
    """
    if isinstance(x, dict):
        return [x]
    return to_sequence(x)


def _is_sequence_except_dict(x):
    """
    In this function, dict is not viewed as sequence.
    """
    if isinstance(x, dict):
        return False
    return is_sequence(x)


2360
def expand_undefined_var(nest1, nest2, names):
2361 2362 2363 2364
    """TODO: make this function recursively.
    nest1: Var1, (UndefinedVar, [1,2,3])
    nest2: Var2, ([1,2,3,4], UndefinedVar)
    In this case, we should not expand recursively.
2365
    """
2366
    from paddle.jit.dy2static.utils import UndefinedVar
2367
    from paddle.jit.dy2static.return_transformer import (
2368 2369
        RETURN_VALUE_PREFIX,
    )
2370 2371

    def pack_undefined_var_as(seq):
2372 2373 2374
        return pack_sequence_as(
            seq, [UndefinedVar("padding") for i in flatten(seq)]
        )
2375

2376
    def map_fn(n1, n2, name, order):
2377 2378 2379
        if not name.startswith(RETURN_VALUE_PREFIX) and (
            isinstance(n1, UndefinedVar) or n1 is None
        ):
2380 2381 2382 2383 2384 2385
            if n1 is None and n2 is not None:
                if order == 0:
                    warnings.warn(
                        "In cond : Var '{}' or part of it is set differently in ifelse branchs, "
                        "<{}, {}> in true branch and <{}, {}> in false branch. Set var to "
                        "'None' in ifelse block might lead to error.".format(
2386 2387 2388
                            name, type(n1), n1, type(n2), n2
                        )
                    )
2389 2390 2391 2392 2393
                else:
                    warnings.warn(
                        "In cond : Var '{}' or part of it is set differently in ifelse branchs, "
                        "<{}, {}> in true branch and <{}, {}> in false branch. Set var to "
                        "'None' in ifelse block might lead to error.".format(
2394 2395 2396
                            name, type(n2), n2, type(n1), n1
                        )
                    )
2397 2398 2399 2400
            return pack_undefined_var_as(n2)
        return n1

    nest1_out = list(
2401 2402
        map(
            map_fn,
2403 2404 2405 2406
            _to_sequence_except_dict(nest1),
            _to_sequence_except_dict(nest2),
            _to_sequence_except_dict(names),
            [0 for i in _to_sequence_except_dict(names)],
2407 2408
        )
    )
2409
    nest2_out = list(
2410 2411
        map(
            map_fn,
2412 2413 2414 2415
            _to_sequence_except_dict(nest2),
            _to_sequence_except_dict(nest1),
            _to_sequence_except_dict(names),
            [1 for i in _to_sequence_except_dict(names)],
2416 2417
        )
    )
2418
    if not _is_sequence_except_dict(nest1):
2419
        nest1_out = nest1_out[0]
2420
    if not _is_sequence_except_dict(nest2):
2421
        nest2_out = nest2_out[0]
2422 2423 2424
    return nest1_out, nest2_out


L
liym27 已提交
2425
def _error_message(what, arg_name, op_name, right_value, error_value):
2426 2427
    error_message = (
        "{what} of '{arg_name}' in {op_name} must be "
L
liym27 已提交
2428
        "{right_value}, but received: {error_value}.".format(
2429 2430 2431 2432 2433 2434 2435
            what=what,
            arg_name=arg_name,
            op_name=op_name,
            right_value=right_value,
            error_value=error_value,
        )
    )
L
liym27 已提交
2436 2437 2438 2439 2440 2441

    return error_message


def case(pred_fn_pairs, default=None, name=None):
    '''
2442 2443
    :api_attr: Static Graph

L
liym27 已提交
2444 2445 2446 2447 2448 2449 2450 2451
    This operator works like an if-elif-elif-else chain.

    Args:
        pred_fn_pairs(list|tuple): A list or tuple of (pred, fn) pairs. ``pred`` is a boolean Tensor with shape [1], ``fn`` is a callable. All callables return the same structure of Tensors.
        default(callable, optional): Callable that returns a structure of Tensors.
        name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.

    Returns:
2452
        Tensor|list(Tensor): Tensors returned by the callable from the first pair whose pred is True,
L
liym27 已提交
2453 2454 2455 2456 2457 2458 2459
        or Tensors returned by ``default`` if no pred in ``pred_fn_pairs`` is True and ``default`` is not None,
        or Tensors returned by the last callable in ``pred_fn_pairs``  if no pred in ``pred_fn_pairs`` is True and ``default`` is None.

    Raises:
        TypeError: If the type of ``pred_fn_pairs`` is not list or tuple.
        TypeError: If the type of elements in ``pred_fn_pairs`` is not tuple.
        TypeError: If the size of tuples in ``pred_fn_pairs`` is not 2.
2460
        TypeError: If the first element of 2-tuple in ``pred_fn_pairs`` is not a Tensor.
L
liym27 已提交
2461 2462 2463 2464 2465 2466
        TypeError: If the second element of 2-tuple in ``pred_fn_pairs`` is not callable.
        TypeError: If ``default`` is not None but it is not callable.

    Examples:
        .. code-block:: python

2467 2468 2469
            import paddle

            paddle.enable_static()
L
liym27 已提交
2470 2471

            def fn_1():
2472
                return paddle.full(shape=[1, 2], dtype='float32', fill_value=1)
L
liym27 已提交
2473 2474

            def fn_2():
2475
                return paddle.full(shape=[2, 2], dtype='int32', fill_value=2)
L
liym27 已提交
2476 2477

            def fn_3():
2478
                return paddle.full(shape=[3], dtype='int32', fill_value=3)
L
liym27 已提交
2479

2480 2481 2482 2483
            main_program = paddle.static.default_startup_program()
            startup_program = paddle.static.default_main_program()

            with paddle.static.program_guard(main_program, startup_program):
2484 2485 2486
                x = paddle.full(shape=[1], dtype='float32', fill_value=0.3)
                y = paddle.full(shape=[1], dtype='float32', fill_value=0.1)
                z = paddle.full(shape=[1], dtype='float32', fill_value=0.2)
L
liym27 已提交
2487

2488 2489 2490
                pred_1 = paddle.less_than(z, x)  # true: 0.2 < 0.3
                pred_2 = paddle.less_than(x, y)  # false: 0.3 < 0.1
                pred_3 = paddle.equal(x, y)      # false: 0.3 == 0.1
L
liym27 已提交
2491 2492

                # Call fn_1 because pred_1 is True
2493
                out_1 = paddle.static.nn.case(
L
liym27 已提交
2494 2495 2496 2497
                    pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3)

                # Argument default is None and no pred in pred_fn_pairs is True. fn_3 will be called.
                # because fn_3 is the last callable in pred_fn_pairs.
2498
                out_2 = paddle.static.nn.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)])
L
liym27 已提交
2499

2500
                exe = paddle.static.Executor(paddle.CPUPlace())
L
liym27 已提交
2501 2502 2503 2504 2505 2506 2507 2508 2509 2510
                res_1, res_2 = exe.run(main_program, fetch_list=[out_1, out_2])
                print(res_1)  # [[1. 1.]]
                print(res_2)  # [3 3 3]
    '''
    helper = LayerHelper('case', **locals())

    def _case_check_args(pred_fn_pairs, default):
        '''
        Check arguments pred_fn_pairs and default. Return canonical pre_fn_pairs and default.
        '''
2511
        check_type(pred_fn_pairs, 'pred_fn_pairs', (list, tuple), 'case')
L
liym27 已提交
2512 2513 2514 2515

        for pred_fn in pred_fn_pairs:
            if not isinstance(pred_fn, tuple):
                raise TypeError(
2516 2517 2518 2519 2520 2521 2522 2523
                    _error_message(
                        "The elements' type",
                        "pred_fn_pairs",
                        "case",
                        tuple,
                        type(pred_fn),
                    )
                )
L
liym27 已提交
2524 2525
            if len(pred_fn) != 2:
                raise TypeError(
2526 2527 2528 2529 2530 2531 2532 2533
                    _error_message(
                        "The tuple's size",
                        "pred_fn_pairs",
                        "case",
                        "2",
                        str(len(pred_fn)) + "-tuple",
                    )
                )
L
liym27 已提交
2534 2535 2536 2537
            pred, fn = pred_fn

            if not isinstance(pred, Variable):
                raise TypeError(
2538 2539 2540 2541 2542 2543 2544 2545
                    _error_message(
                        "The pred's type",
                        "pred_fn_pairs",
                        "case",
                        "boolean Variable",
                        type(pred),
                    )
                )
L
liym27 已提交
2546 2547 2548 2549

            if not callable(fn):
                raise TypeError(
                    "The fn for {} of pred_fn_pairs in Op(case) must"
2550 2551
                    " be callable.".format(pred.name)
                )
L
liym27 已提交
2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572

        if default is None:
            default_index = len(pred_fn_pairs) - 1  # pick the last one
            default = pred_fn_pairs[default_index][1]
            pred_fn_pairs = pred_fn_pairs[:default_index]
        elif not callable(default):
            raise TypeError("The default in Op(case) must be callable.")

        return pred_fn_pairs, default

    pred_fn_pairs, default = _case_check_args(pred_fn_pairs, default)

    false_fn = default
    for pred, true_fn in reversed(pred_fn_pairs):
        false_fn = partial(cond, pred=pred, true_fn=true_fn, false_fn=false_fn)

    final_fn = false_fn

    return final_fn()


2573
class Switch:
Q
qiaolongfei 已提交
2574
    """
2575
    :api_attr: Static Graph
Q
qiaolongfei 已提交
2576

2577 2578 2579 2580 2581
    This class is used to implement Switch branch control function.
    Switch branch contains several case branches and one default branch.
    Switch control flow checks whether the case branch conditions are satisfied in turn,
    and only executes the statement after the first case branch that satisfies the conditions.
    If there is no case branch that satisfies the condition,
2582 2583
    only the statement following the default branch is executed.

2584 2585 2586 2587
    Note:
        A new OP :ref:`api_fluid_layers_case` is highly recommended instead of ``Switch`` if the shape of parameter ``cond`` is [1].
        OP :ref:`api_fluid_layers_case` is easier to use and is called with less code but does the same thing as ``Switch`` .

2588
    Member Functions:
2589
        case(condition): The case branch of Switch whose parameter cond is a scalar Variable of bool type. Only if the cond of the current case branch is True and the cond of the previous case branch is False, the statement after the case branch will be executed, and the statement after the case branch will not be executed.
2590

2591 2592 2593 2594 2595
        default(): The default branch of Switch. When cond of all case branches is False, the statement after default branch is executed.

    Case and default functions can only be used inside the scope of Switch, as shown below:

    .. code-block:: python
2596

2597 2598 2599 2600 2601 2602 2603 2604 2605
        '''
        with fluid.layers.Switch() as switch:
            with switch.case(cond1):
                i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=1)
            with switch.case(cond2):
                i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=2)
            with switch.default():
                i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
        '''
Q
qiaolongfei 已提交
2606

2607 2608
    Args:
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name` .
Q
qiaolongfei 已提交
2609 2610 2611

    Examples:
        .. code-block:: python
2612

2613
            import paddle.fluid as fluid
Q
qiaolongfei 已提交
2614

2615
            lr = fluid.layers.create_global_var(
Q
qiaolongfei 已提交
2616 2617 2618 2619 2620
                shape=[1],
                value=0.0,
                dtype='float32',
                persistable=True,
                name="learning_rate")
2621
            zero_var = fluid.layers.fill_constant(
2622
                shape=[1], dtype='float32', value=0.0)
2623
            one_var = fluid.layers.fill_constant(
Q
qiaolongfei 已提交
2624
                shape=[1], dtype='float32', value=1.0)
2625
            two_var = fluid.layers.fill_constant(
2626
                shape=[1], dtype='float32', value=2.0)
2627

2628
            global_step = fluid.layers.autoincreased_step_counter(counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
Q
qiaolongfei 已提交
2629 2630

            with fluid.layers.control_flow.Switch() as switch:
Q
qiaolongfei 已提交
2631
                with switch.case(global_step == zero_var):
2632
                    fluid.layers.assign(input=one_var, output=lr)
Q
qiaolongfei 已提交
2633
                with switch.default():
2634
                    fluid.layers.assign(input=two_var, output=lr)
Q
qiaolongfei 已提交
2635

2636 2637 2638 2639 2640
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())

            res = exe.run(fluid.default_main_program(), feed={}, fetch_list=[lr])
            print(res) # [array([1.], dtype=float32)]
Q
qiaolongfei 已提交
2641 2642
    """

2643 2644 2645 2646 2647 2648 2649 2650 2651
    def __init__(self, name=None):
        self.helper = LayerHelper('switch', name=name)
        self.inside_scope = False
        self.pre_not_conditions = []

    def case(self, condition):
        if not self.inside_scope:
            raise ValueError("case should be called inside with")

2652
        check_variable_and_dtype(
2653 2654 2655 2656 2657
            condition,
            'condition',
            ['bool'],
            'the member function case of fluid.layers.Switch',
        )
2658

2659 2660
        if len(self.pre_not_conditions) == 0:
            cond_block = ConditionalBlock([condition], is_scalar_condition=True)
2
201716010711 已提交
2661
            not_cond = paddle.logical_not(x=condition)
2662 2663 2664 2665
            self.pre_not_conditions.append(not_cond)
        else:
            pre_cond_num = len(self.pre_not_conditions)
            pre_not_cond = self.pre_not_conditions[pre_cond_num - 1]
2666
            new_not_cond = paddle.logical_and(
2
201716010711 已提交
2667
                x=pre_not_cond, y=paddle.logical_not(x=condition)
2668
            )
2669 2670
            self.pre_not_conditions.append(new_not_cond)
            cond_block = ConditionalBlock(
2671
                [paddle.logical_and(x=pre_not_cond, y=condition)],
2672 2673
                is_scalar_condition=True,
            )
2674 2675 2676 2677 2678 2679 2680 2681 2682

        return ConditionalBlockGuard(cond_block)

    def default(self):
        pre_cond_num = len(self.pre_not_conditions)
        if pre_cond_num == 0:
            raise ValueError("there should be at least one condition")
        cond_block = ConditionalBlock(
            [self.pre_not_conditions[pre_cond_num - 1]],
2683 2684
            is_scalar_condition=True,
        )
2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700
        return ConditionalBlockGuard(cond_block)

    def __enter__(self):
        """
        set flag that now is inside switch.block {}
        :return:
        """
        self.inside_scope = True
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.inside_scope = False
        if exc_type is not None:
            return False  # re-raise exception

        return True
Y
Yu Yang 已提交
2701 2702


2703
class IfElseBlockGuard:
Y
Yu Yang 已提交
2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723
    def __init__(self, is_true, ifelse):
        if not isinstance(ifelse, IfElse):
            raise TypeError("ifelse must be an instance of IfElse class")

        if ifelse.status != IfElse.OUT_IF_ELSE_BLOCKS:
            raise ValueError("You cannot invoke IfElse.block() inside a block")

        self.is_true = is_true
        self.ie = ifelse
        if is_true:
            self.cond_block = ifelse.conditional_true_block
        else:
            self.cond_block = ifelse.conditional_false_block

        if not isinstance(self.cond_block, ConditionalBlock):
            raise TypeError("Unexpected situation")

        self.cond_block = self.cond_block.block()

    def __enter__(self):
2724 2725 2726 2727 2728
        self.ie.status = (
            IfElse.IN_IF_ELSE_TRUE_BLOCKS
            if self.is_true
            else IfElse.IN_IF_ELSE_FALSE_BLOCKS
        )
Y
Yu Yang 已提交
2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739
        self.cond_block.__enter__()

    def __exit__(self, exc_type, exc_val, exc_tb):
        if not self.cond_block.__exit__(exc_type, exc_val, exc_tb):
            # re-raise inside exception
            return False
        if len(self.ie.output_table[1 if self.is_true else 0]) == 0:
            raise ValueError("Must set output inside block")
        self.ie.status = IfElse.OUT_IF_ELSE_BLOCKS


2740
class IfElse:
X
Xin Pan 已提交
2741
    """
2742 2743
    :api_attr: Static Graph

2744 2745 2746 2747
    This class is used to implement IfElse branch control function. IfElse contains two blocks, true_block and false_block. IfElse will put data satisfying True or False conditions into different blocks to run.

    Cond is a 2-D Tensor with shape [N, 1] and data type bool, representing the execution conditions of the corresponding part of the input data.

2748 2749 2750 2751
    Note:
        A new OP :ref:`api_fluid_layers_cond` is highly recommended instead of ``IfElse``. if the shape of parameter ``cond`` is [1].
        OP :ref:`api_fluid_layers_cond` is easier to use and is called with less code but does the same thing as ``IfElse`` .

2752 2753 2754
    IfElse OP is different from other OPs in usage, which may cause some users confusion. Here is a simple example to illustrate this OP.

    .. code-block:: python
2755

2756 2757 2758 2759 2760 2761 2762 2763 2764
        # The following code completes the function: subtract 10 from the data greater than 0 in x, add 10 to the data less than 0 in x, and sum all the data.
        import numpy as np
        import paddle.fluid as fluid

        x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32', append_batch_size=False)
        y = fluid.layers.data(name='y', shape=[4, 1], dtype='float32', append_batch_size=False)

        x_d = np.array([[3], [1], [-2], [-3]]).astype(np.float32)
        y_d = np.zeros((4, 1)).astype(np.float32)
2765

2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783
        # Compare the size of x, y pairs of elements, output cond, cond is shape [4, 1], data type bool 2-D tensor.
        # Based on the input data x_d, y_d, it can be inferred that the data in cond are [[true], [true], [false], [false]].
        cond = fluid.layers.greater_than(x, y)
        # Unlike other common OPs, ie below returned by the OP is an IfElse OP object
        ie = fluid.layers.IfElse(cond)

        with ie.true_block():
            # In this block, according to cond condition, the data corresponding to true dimension in X is obtained and subtracted by 10.
            out_1 = ie.input(x)
            out_1 = out_1 - 10
            ie.output(out_1)
        with ie.false_block():
            # In this block, according to cond condition, get the data of the corresponding condition in X as false dimension, and add 10
            out_1 = ie.input(x)
            out_1 = out_1 + 10
            ie.output(out_1)

        # According to cond condition, the data processed in the two blocks are merged. The output here is output, the type is List, and the element type in List is Variable.
2784
        output = ie() #  [array([[-7.], [-9.], [ 8.], [ 7.]], dtype=float32)]
2785 2786

        # Get the first Variable in the output List and add all elements.
2787
        out = paddle.sum(output[0])
2788 2789 2790 2791 2792

        exe = fluid.Executor(fluid.CPUPlace())
        exe.run(fluid.default_startup_program())

        res = exe.run(fluid.default_main_program(), feed={"x":x_d, "y":y_d}, fetch_list=[out])
2793
        print(res)
2794
        # [array([-1.], dtype=float32)]
X
Xin Pan 已提交
2795 2796

    Args:
2797 2798
        cond (Variable): cond is a 2-D Tensor with shape [N, 1] and data type bool, representing the corresponding execution conditions of N input data. The data type is bool.
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name` .
X
Xin Pan 已提交
2799

2800 2801
    Returns:
        Unlike other common OPs, the OP call returns an IfElse OP object (e.g. ie in the example), which branches the input data by calling the internal functions of the object ``true_block ()``, ``false_block ()``, ``input ()``, ``output ()``, and integrates the data processed by different branches as the overall output by calling the internal ``call ()`` function. The output type is a list, and the type of each element in the list is Variable.
X
Xin Pan 已提交
2802

2803 2804
    Internal Functions:
        The block is constructed by calling the ``with ie. true_block()`` function in the object, and the computational logic under condition true is put into the block. If no corresponding block is constructed, the input data in the corresponding conditional dimension is unchanged.
2805

2806 2807 2808 2809 2810 2811 2812
        The block is constructed by calling the ``with ie. false_block()`` function in the object, and the computational logic under condition false is put into the block. If no corresponding block is constructed, the input data in the corresponding conditional dimension is unchanged.

        ``Out = ie. input (x)`` will take out the data of the corresponding conditional dimension in X and put it into out, supporting the internal processing of multiple inputs in block.

        ``ie. output (out)`` writes the result to the output of the corresponding condition.

        There is a ``call ()`` function inside the object, that is, by calling ``output = ie ()``, all the outputs inside the block of False are fused as the whole output, the output type is a list, and the type of each element in the list is Variable.
2813

X
Xin Pan 已提交
2814
    """
2815

Y
Yu Yang 已提交
2816 2817 2818 2819
    OUT_IF_ELSE_BLOCKS = 0
    IN_IF_ELSE_TRUE_BLOCKS = 1
    IN_IF_ELSE_FALSE_BLOCKS = 2

2820
    def __init__(self, cond, name=None):
2821 2822
        check_type(cond, "cond", Variable, "fluid.layers.IfElse")
        check_type(name, "name", (str, type(None)), "fluid.layers.IfElse")
2823
        self.helper = LayerHelper('ifelse', name=name)
Y
Yu Yang 已提交
2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834
        self.cond = cond
        self.input_table = {}
        self.status = IfElse.OUT_IF_ELSE_BLOCKS
        self.conditional_true_block = ConditionalBlock(inputs=[self.cond])
        self.conditional_false_block = ConditionalBlock(inputs=[self.cond])
        self.output_table = ([], [])  # (true_outs, false_outs)

    def input(self, x):
        if self.status == IfElse.OUT_IF_ELSE_BLOCKS:
            raise ValueError("input must in true/false blocks")
        if id(x) not in self.input_table:
2835
            parent_block = self._parent_block()
Y
Yu Yang 已提交
2836
            out_true = parent_block.create_var(
2837 2838 2839 2840 2841
                name=unique_name.generate_with_ignorable_key(
                    'ifelse_input' + self.helper.name
                ),
                dtype=x.dtype,
            )
Y
Yu Yang 已提交
2842 2843

            out_false = parent_block.create_var(
2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857
                name=unique_name.generate_with_ignorable_key(
                    'ifelse_input' + self.helper.name
                ),
                dtype=x.dtype,
            )
            parent_block.append_op(
                type='split_lod_tensor',
                inputs={
                    'X': x,
                    'Mask': self.cond,
                },
                outputs={'OutTrue': out_true, 'OutFalse': out_false},
                attrs={'level': 0},
            )
Y
Yu Yang 已提交
2858 2859 2860 2861 2862 2863 2864 2865 2866
            self.input_table[id(x)] = (out_true, out_false)
        else:
            out_true, out_false = self.input_table[id(x)]

        if self.status == IfElse.IN_IF_ELSE_TRUE_BLOCKS:
            return out_true
        else:
            return out_false

2867
    def _parent_block(self):
Y
Yu Yang 已提交
2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880
        current_block = self.helper.main_program.current_block()
        return self.helper.main_program.block(current_block.parent_idx)

    def true_block(self):
        return IfElseBlockGuard(True, self)

    def false_block(self):
        return IfElseBlockGuard(False, self)

    def output(self, *outs):
        if self.status == self.OUT_IF_ELSE_BLOCKS:
            raise ValueError("output can only be invoked in the sub-block")

2881 2882 2883
        out_table = self.output_table[
            1 if self.status == self.IN_IF_ELSE_TRUE_BLOCKS else 0
        ]
2884
        parent_block = self._parent_block()
Y
Yu Yang 已提交
2885
        for each_out in outs:
2886 2887 2888
            check_type(
                each_out, "each output", Variable, "fluid.layers.IfElse.output"
            )
Y
Yu Yang 已提交
2889 2890
            # create outside tensor
            outside_out = parent_block.create_var(
2891 2892 2893 2894 2895
                name=unique_name.generate_with_ignorable_key(
                    "_".join([self.helper.name, 'output'])
                ),
                dtype=each_out.dtype,
            )
Y
Yu Yang 已提交
2896 2897 2898
            out_table.append(outside_out)

            # assign local var to outside
2899
            assign(input=each_out, output=outside_out)
Y
Yu Yang 已提交
2900 2901 2902 2903

    def __call__(self):
        if self.status != self.OUT_IF_ELSE_BLOCKS:
            raise ValueError("IfElse::__call__ must be out of sub-block")
2904
        false_len, true_len = list(map(len, self.output_table))
Y
Yu Yang 已提交
2905
        if false_len == 0 and true_len == 0:
2906 2907 2908
            raise ValueError(
                "Must invoke true_block/false_block before " "__call__"
            )
Y
Yu Yang 已提交
2909 2910 2911 2912 2913 2914 2915 2916 2917 2918
        elif false_len != true_len and false_len != 0 and true_len != 0:
            raise ValueError("The output side must be same")
        elif false_len == 0 or true_len == 0:
            return self.output_table[0 if false_len != 0 else 1]

        # else none of false_len/true_len is zero
        # merge together
        rlist = []
        for false_var, true_var in zip(*self.output_table):
            rlist.append(
2919 2920 2921 2922 2923 2924 2925 2926
                merge_lod_tensor(
                    in_true=true_var,
                    in_false=false_var,
                    mask=self.cond,
                    x=self.cond,
                    level=0,
                )
            )
Y
Yu Yang 已提交
2927
        return rlist
2928 2929


L
liym27 已提交
2930 2931
def switch_case(branch_index, branch_fns, default=None, name=None):
    '''
2932 2933
    :api_attr: Static Graph

L
liym27 已提交
2934 2935 2936
    This operator is like a C++ switch/case statement.

    Args:
2937
        branch_index(Tensor): A Tensor with shape [1] to specify which branch to execute. The data type is ``int32``, ``int64`` or ``uint8``.
L
liym27 已提交
2938 2939 2940 2941 2942
        branch_fns(dict|list|tuple): If it's a list or tuple, the elements in it could be pairs of (int, callable) or simple callables whose actual index will be used as the index of callable. If it's a dict, its key is a python integer and the value is a callable. All callables return the same structure of Tensors.
        default(callable, optional): Callable that returns a structure of Tensors.
        name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.

    Returns:
2943
        Tensor|list(Tensor): Tensors returned by the callable specified by ``branch_index`` in ``branch_fns``,
L
liym27 已提交
2944 2945 2946 2947
        or Tensors returned by ``default`` if ``default`` is not None and no index matches in ``branch_fns``,
        or Tensors returned by the callable with the max index in ``branch_fns`` if ``default`` is None and no index matches in ``branch_fns``.

    Raises:
2948
        TypeError: If the type of ``branch_index`` is not Tensor.
L
liym27 已提交
2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959
        TypeError: If the data type of ``branch_index`` is not ``int32``, ``int64`` or ``uint8``.
        TypeError: If the type of ``branch_fns`` is not dict, list or tuple.
        TypeError: If the elements of ``branch_fns`` is not 2-tuple.
        TypeError: If the first element of 2-tuple in ``branch_fns`` is not integer.
        ValueError: If the first element of 2-tuple in ``branch_fns`` is not unique.
        TypeError: If the second element of 2-tuple in ``branch_fns`` is not callable.
        TypeError: If ``default`` is not None but it is not callable.

    Examples:
        .. code-block:: python

2960 2961 2962
            import paddle

            paddle.enable_static()
2963

L
liym27 已提交
2964
            def fn_1():
2965
                return paddle.full(shape=[1, 2], dtype='float32', fill_value=1)
L
liym27 已提交
2966 2967

            def fn_2():
2968
                return paddle.full(shape=[2, 2], dtype='int32', fill_value=2)
L
liym27 已提交
2969 2970

            def fn_3():
2971
                return paddle.full(shape=[3], dtype='int32', fill_value=3)
L
liym27 已提交
2972

2973 2974 2975
            main_program = paddle.static.default_startup_program()
            startup_program = paddle.static.default_main_program()
            with paddle.static.program_guard(main_program, startup_program):
2976 2977
                index_1 = paddle.full(shape=[1], dtype='int32', fill_value=1)
                index_2 = paddle.full(shape=[1], dtype='int32', fill_value=2)
L
liym27 已提交
2978

2979
                out_1 = paddle.static.nn.switch_case(
L
liym27 已提交
2980 2981 2982 2983
                    branch_index=index_1,
                    branch_fns={1: fn_1, 2: fn_2},
                    default=fn_3)

2984
                out_2 = paddle.static.nn.switch_case(
L
liym27 已提交
2985 2986 2987 2988 2989
                    branch_index=index_2,
                    branch_fns=[(1, fn_1), (2, fn_2)],
                    default=fn_3)

                # Argument default is None and no index matches. fn_3 will be called because of the max index 7.
2990
                out_3 = paddle.static.nn.switch_case(
L
liym27 已提交
2991 2992 2993
                    branch_index=index_2,
                    branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)])

2994
                exe = paddle.static.Executor(paddle.CPUPlace())
2995
                res_1, res_2, res_3 = exe.run(main_program, fetch_list=[out_1, out_2, out_3])
L
liym27 已提交
2996 2997 2998 2999 3000 3001 3002 3003
                print(res_1)  # [[1. 1.]]
                print(res_2)  # [[2 2] [2 2]]
                print(res_3)  # [3 3 3]
    '''
    helper = LayerHelper('switch_case', **locals())

    def _check_args(branch_index, branch_fns, default):

3004 3005 3006 3007 3008 3009
        check_variable_and_dtype(
            branch_index,
            'branch_index',
            ['uint8', 'int32', 'int64'],
            'switch_case',
        )
L
liym27 已提交
3010 3011 3012 3013

        if convert_dtype(branch_index.dtype) != "int64":
            branch_index = cast(branch_index, "int64")

3014
        check_type(branch_fns, 'branch_fns', (list, tuple, dict), 'switch_case')
L
liym27 已提交
3015

3016 3017 3018
        branch_fns = (
            branch_fns.items() if isinstance(branch_fns, dict) else branch_fns
        )
L
liym27 已提交
3019

3020 3021 3022 3023 3024
        branch_fns = (
            list(enumerate(branch_fns))
            if all(callable(fn) for fn in branch_fns)
            else branch_fns
        )
L
liym27 已提交
3025 3026 3027 3028 3029

        keys_of_fns = []
        for index_fn_pair in branch_fns:
            if not isinstance(index_fn_pair, tuple):
                raise TypeError(
3030 3031 3032 3033 3034 3035 3036 3037
                    _error_message(
                        "The elements' type",
                        "branch_fns",
                        "switch_case",
                        tuple,
                        type(branch_fns),
                    )
                )
L
liym27 已提交
3038 3039 3040

            if len(index_fn_pair) != 2:
                raise TypeError(
3041 3042 3043 3044 3045 3046 3047 3048
                    _error_message(
                        "The tuple's size",
                        "branch_fns",
                        "switch_case",
                        "2",
                        str(len(index_fn_pair)) + "-tuple",
                    )
                )
L
liym27 已提交
3049 3050 3051 3052 3053

            key, fn = index_fn_pair

            if not isinstance(key, int):
                raise TypeError(
3054 3055 3056 3057 3058 3059 3060 3061
                    _error_message(
                        "The key's type",
                        "branch_fns",
                        "switch_case",
                        int,
                        type(key),
                    )
                )
L
liym27 已提交
3062 3063 3064

            if key in keys_of_fns:
                raise ValueError(
3065 3066 3067 3068
                    "The key in 'branch_fns' must be unique, but '{}' appears more than once.".format(
                        key
                    )
                )
L
liym27 已提交
3069 3070 3071 3072 3073
            else:
                keys_of_fns.append(key)

            if not callable(fn):
                raise TypeError(
3074 3075
                    _error_message(
                        "The type of function for key {}".format(key),
3076 3077 3078 3079 3080 3081
                        "branch_fns",
                        "switch_case",
                        "callable",
                        type(fn),
                    )
                )
L
liym27 已提交
3082 3083 3084 3085 3086 3087 3088 3089 3090 3091

        if default is None:
            default = sorted(branch_fns)[-1][1]
            branch_fns = sorted(branch_fns)[:-1]
        elif not callable(default):
            raise TypeError("The default in Op(case) must be callable.")

        pred_fn_pairs = []
        for index, fn in branch_fns:
            new_index = fill_constant(shape=[1], dtype="int64", value=index)
3092
            pred = paddle.equal(branch_index, new_index)
L
liym27 已提交
3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105
            pred_fn_pairs.append((pred, fn))

        return pred_fn_pairs, default

    pred_fn_pairs, default = _check_args(branch_index, branch_fns, default)
    false_fn = default
    for pred, true_fn in pred_fn_pairs:
        false_fn = partial(cond, pred=pred, true_fn=true_fn, false_fn=false_fn)

    final_fn = false_fn
    return final_fn()


3106
@templatedoc()
Y
Yang Yu 已提交
3107
def reorder_lod_tensor_by_rank(x, rank_table):
3108 3109 3110 3111
    """
    ${comment}

    Args:
3112 3113
        x(${x_type}): ${x_comment}.
        rank_table(${rank_table_type}): ${rank_table_comment}.
3114

3115
    Returns:
3116
        out(${out_type}): ${out_comment}.
3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          data_desc = (['input', [9], 0], ['ref', [5], 1])
          data = fluid.layers.data(name=data_desc[0][0], shape=data_desc[0][1])
          rank_data = fluid.layers.data(name=data_desc[1][0], shape=data_desc[1][1])
          table = fluid.layers.control_flow.lod_rank_table(rank_data)
          new_data = fluid.layers.reorder_lod_tensor_by_rank(
                           x=data, rank_table=table)

    """
3130 3131

    check_type(x, 'x', (Variable), 'reorder_lod_tensor_by_rank')
3132 3133 3134
    check_type(
        rank_table, 'rank_table', (Variable), 'reorder_lod_tensor_by_rank'
    )
3135 3136 3137
    if rank_table.type != core.VarDesc.VarType.LOD_RANK_TABLE:
        raise TypeError("The type of rank_table should be LOD_RANK_TABLE.")

Y
Yang Yu 已提交
3138 3139
    helper = LayerHelper('reorder_lod_tensor_by_rank', **locals())

X
Xin Pan 已提交
3140
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
3141 3142 3143 3144 3145
    helper.append_op(
        type='reorder_lod_tensor_by_rank',
        inputs={'X': [x], 'RankTable': [rank_table]},
        outputs={'Out': [out]},
    )
Y
Yang Yu 已提交
3146
    return out
3147 3148


3149
def is_empty(x, name=None):
3150
    """
3151

3152
    Test whether a Tensor is empty.
3153 3154

    Args:
3155 3156 3157 3158
        x (Tensor): The Tensor to be tested.
        name (str, optional): The default value is ``None`` . Normally users
                            don't have to set this parameter. For more information,
                            please refer to :ref:`api_guide_Name` .
3159 3160

    Returns:
3161
        Tensor: A bool scalar Tensor. True if 'x' is an empty Tensor.
3162 3163 3164 3165

    Examples:
        .. code-block:: python

3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176
            import paddle

            input = paddle.rand(shape=[4, 32, 32], dtype='float32')
            res = paddle.is_empty(x=input)
            print("res:", res)
            # ('res:', Tensor: eager_tmp_1
            #    - place: CPUPlace
            #    - shape: [1]
            #    - layout: NCHW
            #    - dtype: bool
            #    - data: [0])
3177

3178
    """
H
hong 已提交
3179
    if in_dygraph_mode():
W
wanghuancoder 已提交
3180
        return _C_ops.is_empty(x)
3181 3182
    if _in_legacy_dygraph():
        return _legacy_C_ops.is_empty(x)
3183

3184 3185 3186
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int32', 'int64'], 'is_empty'
    )
3187 3188
    check_type(name, "name", (str, type(None)), "is_empty")

3189
    helper = LayerHelper("is_empty", **locals())
3190 3191
    cond = helper.create_variable_for_type_inference(dtype='bool')
    cond.stop_gradient = True
3192 3193 3194
    helper.append_op(
        type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]}
    )
3195
    return cond