rnn.py 91.3 KB
Newer Older
G
Guo Sheng 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import sys
G
Guo Sheng 已提交
16
from functools import partial, reduce
J
Jiaqi Liu 已提交
17
import warnings
G
Guo Sheng 已提交
18

19

20
import paddle
21
from paddle.utils import deprecated
G
Guo Sheng 已提交
22 23 24 25
from . import nn
from . import tensor
from . import control_flow
from . import utils
26
from . import sequence_lod
G
Guo Sheng 已提交
27
from .utils import *
weixin_46829950's avatar
weixin_46829950 已提交
28
from .. import core
29 30
from ..framework import default_main_program
from ..data_feeder import convert_dtype
31
from ..layer_helper import LayerHelper
J
Jiabin Yang 已提交
32
from ..framework import _non_static_mode
33
from ..param_attr import ParamAttr
X
Xing Wu 已提交
34
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
35

36
from collections.abc import Sequence
G
Guo Sheng 已提交
37 38 39 40 41 42

__all__ = [
    'RNNCell',
    'GRUCell',
    'LSTMCell',
    'rnn',
F
Feiyu Chan 已提交
43
    'birnn',
G
Guo Sheng 已提交
44
    'dynamic_decode',
45 46 47 48 49 50
    'dynamic_lstm',
    'dynamic_lstmp',
    'dynamic_gru',
    'gru_unit',
    'lstm_unit',
    'lstm',
G
Guo Sheng 已提交
51 52 53
]


54
class RNNCell:
G
Guo Sheng 已提交
55
    """
56
        :api_attr: Static Graph
S
swtkiwi 已提交
57

G
Guo Sheng 已提交
58 59 60 61 62 63
    RNNCell is the base class for abstraction representing the calculations
    mapping the input and state to the output and new state. It is suitable to
    and mostly used in RNN.
    """

    def call(self, inputs, states, **kwargs):
64
        r"""
G
Guo Sheng 已提交
65 66 67 68 69 70 71 72 73 74
        Every cell must implement this method to do the calculations mapping the
        inputs and states to the output and new states.

        To be more flexible, both inputs and states can be a tensor variable or
        a nested structure (list|tuple|namedtuple|dict) of tensor variable, that
        is, a (possibly nested structure of) tensor variable[s].

        Parameters:
            inputs: A (possibly nested structure of) tensor variable[s].
            states: A (possibly nested structure of) tensor variable[s].
75 76
            **kwargs: Additional keyword arguments, provided by the caller.

G
Guo Sheng 已提交
77 78 79 80 81 82 83 84 85 86 87
        Returns:
            tuple: outputs and new_states pair. outputs and new_states both \
                can be nested structure of tensor variables. new_states must \
                have the same structure with states.

        """
        raise NotImplementedError("RNNCell must implent the call function.")

    def __call__(self, inputs, states, **kwargs):
        return self.call(inputs, states, **kwargs)

88 89 90 91 92 93 94 95
    def get_initial_states(
        self,
        batch_ref,
        shape=None,
        dtype='float32',
        init_value=0,
        batch_dim_idx=0,
    ):
96
        r"""
G
Guo Sheng 已提交
97 98 99 100 101 102 103
        Generate initialized states according to provided shape, data type and
        value.

        Parameters:
            batch_ref: A (possibly nested structure of) tensor variable[s].
                The first dimension of the tensor will be used as batch size to
                initialize states.
T
tianshuo78520a 已提交
104
            shape: A (possibly nested structure of) shape[s], where a shape is
G
Guo Sheng 已提交
105 106 107
                represented as a list/tuple of integer). -1(for batch size) will
                beautomatically inserted if shape is not started with it. If None,
                property `state_shape` will be used. The default value is None.
T
tianshuo78520a 已提交
108
            dtype: A (possibly nested structure of) data type[s]. The structure
G
Guo Sheng 已提交
109
                must be same as that of `shape`, except when all tensors' in states
X
Xing Wu 已提交
110
                has the same data type, a single data type can be used. If
G
Guo Sheng 已提交
111
                property `cell.state_shape` is not available, float32 will be used
X
Xing Wu 已提交
112
                as the data type. The default value is float32.
G
Guo Sheng 已提交
113
            init_value: A float value used to initialize states.
114 115
            batch_dim_idx: An integer indicating which dimension of the tensor in
                inputs represents batch size.  The default value is 0.
116

G
Guo Sheng 已提交
117 118 119 120
        Returns:
            Variable: tensor variable[s] packed in the same structure provided \
                by shape, representing the initialized states.
        """
121 122 123 124 125 126
        check_variable_and_dtype(
            batch_ref,
            'batch_ref',
            ['float32', 'float64', 'int32', 'int64'],
            'RNNCell',
        )
127
        check_type(shape, 'shape', (list, tuple, type(None), int), 'RNNCell')
X
Xing Wu 已提交
128 129 130 131
        if isinstance(shape, (list, tuple)):
            shapes = map_structure(lambda x: x, shape)
            if isinstance(shape, list):
                for i, _shape in enumerate(shapes):
132
                    check_type(_shape, 'shapes[' + str(i) + ']', int, 'RNNCell')
X
Xing Wu 已提交
133
            else:
134
                check_type(shapes, 'shapes', int, 'RNNCell')
X
Xing Wu 已提交
135 136
        check_dtype(dtype, 'dtype', ['float32', 'float64'], 'RNNCell')

G
Guo Sheng 已提交
137 138 139 140 141
        # TODO: use inputs and batch_size
        batch_ref = flatten(batch_ref)[0]

        def _is_shape_sequence(seq):
            """For shape, list/tuple of integer is the finest-grained objection"""
142 143 144 145
            if isinstance(seq, list) or isinstance(seq, tuple):
                if reduce(
                    lambda flag, x: isinstance(x, int) and flag, seq, True
                ):
G
Guo Sheng 已提交
146 147 148 149
                    return False
            # TODO: Add check for the illegal
            if isinstance(seq, dict):
                return True
150
            return isinstance(seq, Sequence) and not isinstance(seq, str)
G
Guo Sheng 已提交
151

152
        class Shape:
G
Guo Sheng 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
            def __init__(self, shape):
                self.shape = shape if shape[0] == -1 else ([-1] + list(shape))

        # nested structure of shapes
        states_shapes = self.state_shape if shape is None else shape
        is_sequence_ori = utils.is_sequence
        utils.is_sequence = _is_shape_sequence
        states_shapes = map_structure(lambda shape: Shape(shape), states_shapes)
        utils.is_sequence = is_sequence_ori

        # nested structure of dtypes
        try:
            states_dtypes = self.state_dtype if dtype is None else dtype
        except NotImplementedError:  # use fp32 as default
            states_dtypes = "float32"
        if len(flatten(states_dtypes)) == 1:
            dtype = flatten(states_dtypes)[0]
            states_dtypes = map_structure(lambda shape: dtype, states_shapes)

        init_states = map_structure(
            lambda shape, dtype: tensor.fill_constant_batch_size_like(
                input=batch_ref,
                shape=shape.shape,
                dtype=dtype,
177
                value=init_value,
178 179 180 181 182
                input_dim_idx=batch_dim_idx,
            ),
            states_shapes,
            states_dtypes,
        )
G
Guo Sheng 已提交
183 184 185 186 187
        return init_states

    @property
    def state_shape(self):
        """
188
        Abstract method (property).
G
Guo Sheng 已提交
189
        Used to initialize states.
T
tianshuo78520a 已提交
190
        A (possibly nested structure of) shape[s], where a shape is represented
G
Guo Sheng 已提交
191
        as a list/tuple of integers (-1 for batch size would be automatically
192
        inserted into a shape if shape is not started with it).
G
Guo Sheng 已提交
193 194 195 196
        Not necessary to be implemented if states are not initialized by
        `get_initial_states` or the `shape` argument is provided when using
        `get_initial_states`.
        """
197
        raise NotImplementedError(
198 199
            "Please add implementaion for `state_shape` in the used cell."
        )
G
Guo Sheng 已提交
200 201 202 203

    @property
    def state_dtype(self):
        """
204
        Abstract method (property).
G
Guo Sheng 已提交
205
        Used to initialize states.
T
tianshuo78520a 已提交
206
        A (possibly nested structure of) data types[s]. The structure must be
G
Guo Sheng 已提交
207
        same as that of `shape`, except when all tensors' in states has the same
T
tianshuo78520a 已提交
208
        data type, a single data type can be used.
G
Guo Sheng 已提交
209 210 211 212
        Not necessary to be implemented if states are not initialized
        by `get_initial_states` or the `dtype` argument is provided when using
        `get_initial_states`.
        """
213
        raise NotImplementedError(
214 215
            "Please add implementaion for `state_dtype` in the used cell."
        )
G
Guo Sheng 已提交
216 217 218


class GRUCell(RNNCell):
219
    r"""
220
        :api_attr: Static Graph
S
swtkiwi 已提交
221

222
    Gated Recurrent Unit cell. It is a wrapper for
G
Guo Sheng 已提交
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
    `fluid.contrib.layers.rnn_impl.BasicGRUUnit` to make it adapt to RNNCell.

    The formula used is as follow:

    .. math::

        u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)

        r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)

        \\tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)

        h_t & = u_t \odot h_{t-1} + (1-u_t) \odot \\tilde{h_t}

    For more details, please refer to  `Learning Phrase Representations using
    RNN Encoder Decoder for Statistical Machine Translation <https://arxiv.org/pdf/1406.1078.pdf>`_

    Examples:

        .. code-block:: python

            import paddle.fluid.layers as layers
            cell = layers.GRUCell(hidden_size=256)
    """

248 249 250 251 252 253 254 255 256 257
    def __init__(
        self,
        hidden_size,
        param_attr=None,
        bias_attr=None,
        gate_activation=None,
        activation=None,
        dtype="float32",
        name="GRUCell",
    ):
G
Guo Sheng 已提交
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
        """
        Constructor of GRUCell.

        Parameters:
            hidden_size (int): The hidden size in the GRU cell.
            param_attr(ParamAttr, optional): The parameter attribute for the learnable
                weight matrix. Default: None.
            bias_attr (ParamAttr, optional): The parameter attribute for the bias
                of GRU. Default: None.
            gate_activation (function, optional): The activation function for :math:`act_g`.
                Default: `fluid.layers.sigmoid`.
            activation (function, optional): The activation function for :math:`act_c`.
                Default: `fluid.layers.tanh`.
            dtype(string, optional): The data type used in this cell. Default float32.
            name(string, optional) : The name scope used to identify parameters and biases.
        """
X
Xing Wu 已提交
274 275
        check_type(hidden_size, 'hidden_size', (int), 'GRUCell')
        check_dtype(dtype, 'dtype', ['float32', 'float64'], 'GRUCell')
G
Guo Sheng 已提交
276 277
        self.hidden_size = hidden_size
        from .. import contrib  # TODO: resolve recurrent import
278

G
Guo Sheng 已提交
279
        self.gru_unit = contrib.layers.rnn_impl.BasicGRUUnit(
280 281 282 283 284 285 286 287
            name,
            hidden_size,
            param_attr,
            bias_attr,
            gate_activation,
            activation,
            dtype,
        )
G
Guo Sheng 已提交
288 289

    def call(self, inputs, states):
290
        r"""
G
Guo Sheng 已提交
291 292 293 294 295
        Perform calculations of GRU.

        Parameters:
            inputs(Variable): A tensor with shape `[batch_size, input_size]`,
                corresponding to :math:`x_t` in the formula. The data type
X
Xing Wu 已提交
296
                should be float32 or float64.
G
Guo Sheng 已提交
297 298
            states(Variable): A tensor with shape `[batch_size, hidden_size]`.
                corresponding to :math:`h_{t-1}` in the formula. The data type
X
Xing Wu 已提交
299
                should be float32 or float64.
G
Guo Sheng 已提交
300 301 302 303 304

        Returns:
            tuple: A tuple( :code:`(outputs, new_states)` ), where `outputs` and \
                `new_states` is the same tensor shaped `[batch_size, hidden_size]`, \
                corresponding to :math:`h_t` in the formula. The data type of the \
305
                tensor is same as that of `states`.
G
Guo Sheng 已提交
306
        """
X
Xing Wu 已提交
307

308 309 310 311 312 313
        check_variable_and_dtype(
            inputs, 'inputs', ['float32', 'float64'], 'GRUCell'
        )
        check_variable_and_dtype(
            states, 'states', ['float32', 'float64'], 'GRUCell'
        )
G
Guo Sheng 已提交
314 315 316 317 318 319 320 321 322 323 324 325 326 327
        new_hidden = self.gru_unit(inputs, states)
        return new_hidden, new_hidden

    @property
    def state_shape(self):
        """
        The `state_shape` of GRUCell is a shape `[hidden_size]` (-1 for batch
        size would be automatically inserted into shape). The shape corresponds
        to :math:`h_{t-1}`.
        """
        return [self.hidden_size]


class LSTMCell(RNNCell):
328
    r"""
329
        :api_attr: Static Graph
S
swtkiwi 已提交
330

331
    Long-Short Term Memory cell. It is a wrapper for
G
Guo Sheng 已提交
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
    `fluid.contrib.layers.rnn_impl.BasicLSTMUnit` to make it adapt to RNNCell.

    The formula used is as follow:

    .. math::

        i_{t} & = act_g(W_{x_{i}}x_{t} + W_{h_{i}}h_{t-1} + b_{i})

        f_{t} & = act_g(W_{x_{f}}x_{t} + W_{h_{f}}h_{t-1} + b_{f} + forget\\_bias)

        c_{t} & = f_{t}c_{t-1} + i_{t} act_c (W_{x_{c}}x_{t} + W_{h_{c}}h_{t-1} + b_{c})

        o_{t} & = act_g(W_{x_{o}}x_{t} + W_{h_{o}}h_{t-1} + b_{o})

        h_{t} & = o_{t} act_c (c_{t})
347

G
Guo Sheng 已提交
348 349 350 351 352 353 354 355 356 357
    For more details, please refer to `RECURRENT NEURAL NETWORK REGULARIZATION <http://arxiv.org/abs/1409.2329>`_

    Examples:

        .. code-block:: python

            import paddle.fluid.layers as layers
            cell = layers.LSTMCell(hidden_size=256)
    """

358 359 360 361 362 363 364 365 366 367 368
    def __init__(
        self,
        hidden_size,
        param_attr=None,
        bias_attr=None,
        gate_activation=None,
        activation=None,
        forget_bias=1.0,
        dtype="float32",
        name="LSTMCell",
    ):
G
Guo Sheng 已提交
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
        """
        Constructor of LSTMCell.

        Parameters:
            hidden_size (int): The hidden size in the LSTM cell.
            param_attr(ParamAttr, optional): The parameter attribute for the learnable
                weight matrix. Default: None.
            bias_attr (ParamAttr, optional): The parameter attribute for the bias
                of LSTM. Default: None.
            gate_activation (function, optional): The activation function for :math:`act_g`.
                Default: 'fluid.layers.sigmoid'.
            activation (function, optional): The activation function for :math:`act_h`.
                Default: 'fluid.layers.tanh'.
            forget_bias(float, optional): forget bias used when computing forget gate.
                Default 1.0
            dtype(string, optional): The data type used in this cell. Default float32.
            name(string, optional) : The name scope used to identify parameters and biases.
        """
X
Xing Wu 已提交
387 388 389

        check_type(hidden_size, 'hidden_size', (int), 'LSTMCell')
        check_dtype(dtype, 'dtype', ['float32', 'float64'], 'LSTMCell')
G
Guo Sheng 已提交
390 391
        self.hidden_size = hidden_size
        from .. import contrib  # TODO: resolve recurrent import
392

G
Guo Sheng 已提交
393
        self.lstm_unit = contrib.layers.rnn_impl.BasicLSTMUnit(
394 395 396 397 398 399 400 401 402
            name,
            hidden_size,
            param_attr,
            bias_attr,
            gate_activation,
            activation,
            forget_bias,
            dtype,
        )
G
Guo Sheng 已提交
403 404

    def call(self, inputs, states):
405
        r"""
G
Guo Sheng 已提交
406 407 408 409 410
        Perform calculations of LSTM.

        Parameters:
            inputs(Variable): A tensor with shape `[batch_size, input_size]`,
                corresponding to :math:`x_t` in the formula. The data type
X
Xing Wu 已提交
411
                should be float32 or float64.
T
tianshuo78520a 已提交
412
            states(Variable): A list of containing two tensors, each shaped
G
Guo Sheng 已提交
413
                `[batch_size, hidden_size]`, corresponding to :math:`h_{t-1}, c_{t-1}`
X
Xing Wu 已提交
414
                in the formula. The data type should be float32 or float64.
G
Guo Sheng 已提交
415 416 417 418 419 420 421 422 423

        Returns:
            tuple: A tuple( :code:`(outputs, new_states)` ), where `outputs` is \
                a tensor with shape `[batch_size, hidden_size]`, corresponding \
                to :math:`h_{t}` in the formula; `new_states` is a list containing \
                two tenser variables shaped `[batch_size, hidden_size]`, corresponding \
                to :math:`h_{t}, c_{t}` in the formula. The data type of these \
                tensors all is same as that of `states`.
        """
X
Xing Wu 已提交
424

425 426 427
        check_variable_and_dtype(
            inputs, 'inputs', ['float32', 'float64'], 'LSTMCell'
        )
X
Xing Wu 已提交
428 429 430
        check_type(states, 'states', list, 'LSTMCell')
        if isinstance(states, list):
            for i, state in enumerate(states):
431 432 433 434 435 436
                check_variable_and_dtype(
                    state,
                    'state[' + str(i) + ']',
                    ['float32', 'float64'],
                    'LSTMCell',
                )
X
Xing Wu 已提交
437

G
Guo Sheng 已提交
438 439 440 441 442 443 444 445 446 447 448 449 450 451
        pre_hidden, pre_cell = states
        new_hidden, new_cell = self.lstm_unit(inputs, pre_hidden, pre_cell)
        return new_hidden, [new_hidden, new_cell]

    @property
    def state_shape(self):
        """
        The `state_shape` of LSTMCell is a list with two shapes: `[[hidden_size], [hidden_size]]`
        (-1 for batch size would be automatically inserted into shape). These two
        shapes correspond to :math:`h_{t-1}` and :math:`c_{t-1}` separately.
        """
        return [[self.hidden_size], [self.hidden_size]]


452 453 454 455 456 457 458 459 460
def rnn(
    cell,
    inputs,
    initial_states=None,
    sequence_length=None,
    time_major=False,
    is_reverse=False,
    **kwargs
):
G
Guo Sheng 已提交
461 462
    """
    rnn creates a recurrent neural network specified by RNNCell `cell`,
463
    which performs :code:`cell.call()` (for dygraph mode :code:`cell.forward`)
F
Feiyu Chan 已提交
464 465 466 467
    repeatedly until reaches to the maximum length of `inputs`.

    Arguments:
        cell(RNNCellBase): An instance of `RNNCellBase`.
468 469
        inputs(Tensor): the input sequences.
            If time_major is True, the shape is
F
Feiyu Chan 已提交
470 471
            `[time_steps, batch_size, input_size]`
            else the shape is `[batch_size, time_steps, input_size]`.
472 473
        initial_states(Tensor|tuple|list, optional): the initial state of the
            rnn cell. Tensor or a possibly nested structure of tensors. If not
F
Feiyu Chan 已提交
474 475
            provided, `cell.get_initial_states` would be called to produce
            the initial state. Defaults to None.
476
        sequence_length (Tensor, optional): shape `[batch_size]`, dtype: int64
F
Feiyu Chan 已提交
477
            or int32. The valid lengths of input sequences. Defaults to None.
478 479
            If `sequence_length` is not None, the inputs are treated as
            padded sequences. In each input sequence, elements whose time step
F
Feiyu Chan 已提交
480 481 482 483 484
            index are not less than the valid length are treated as paddings.
        time_major (bool): Whether the first dimension of the input means the
            time steps. Defaults to False.
        is_reverse (bool, optional): Indicate whether to calculate in the reverse
            order of input sequences. Defaults to False.
485
        **kwargs: Additional keyword arguments to pass to `forward` of the cell.
G
Guo Sheng 已提交
486 487

    Returns:
F
Feiyu Chan 已提交
488
        (outputs, final_states)
489
        outputs (Tensor|list|tuple): the output sequence. Tensor or nested
F
Feiyu Chan 已提交
490
            structure of Tensors.
491 492
            If `time_major` is True, the shape of each tensor in outpus is
            `[time_steps, batch_size, hidden_size]`, else
F
Feiyu Chan 已提交
493 494
            `[batch_size, time_steps, hidden_size]`.
        final_states (Tensor|list|tuple): final states. A (possibly nested structure of)
495
            tensor[s], representing the final state for RNN. It has the same
F
Feiyu Chan 已提交
496 497
            structure of intial state. Each tensor in final states has the same
            shape and dtype as the corresponding tensor in initial states.
498

G
Guo Sheng 已提交
499 500 501 502 503

    Examples:

        .. code-block:: python

F
Feiyu Chan 已提交
504 505 506 507 508 509 510
            import paddle
            paddle.disable_static()

            cell = paddle.nn.SimpleRNNCell(16, 32)

            inputs = paddle.rand((4, 23, 16))
            prev_h = paddle.randn((4, 32))
511
            outputs, final_states = paddle.fluid.layers.rnn(cell, inputs, prev_h)
F
Feiyu Chan 已提交
512

G
Guo Sheng 已提交
513
    """
J
Jiabin Yang 已提交
514
    if _non_static_mode():
515 516 517 518 519 520 521 522 523
        return _rnn_dynamic_graph(
            cell,
            inputs,
            initial_states,
            sequence_length,
            time_major,
            is_reverse,
            **kwargs
        )
F
Feiyu Chan 已提交
524
    else:
525 526 527 528 529 530 531 532 533
        return _rnn_static_graph(
            cell,
            inputs,
            initial_states,
            sequence_length,
            time_major,
            is_reverse,
            **kwargs
        )
F
Feiyu Chan 已提交
534 535


536
class ArrayWrapper:
F
Feiyu Chan 已提交
537 538 539 540 541 542 543
    def __init__(self, x):
        self.array = [x]

    def append(self, x):
        self.array.append(x)
        return self

544 545 546
    def __getitem__(self, item):
        return self.array.__getitem__(item)

F
Feiyu Chan 已提交
547 548 549

def _maybe_copy(state, new_state, step_mask):
    """update rnn state or just pass the old state through"""
550 551 552
    new_state = nn.elementwise_mul(
        new_state, step_mask, axis=0
    ) + nn.elementwise_mul(state, (1 - step_mask), axis=0)
F
Feiyu Chan 已提交
553 554 555 556 557
    return new_state


def _transpose_batch_time(x):
    perm = [1, 0] + list(range(2, len(x.shape)))
558
    return paddle.transpose(x, perm)
F
Feiyu Chan 已提交
559 560


561 562 563 564 565 566 567 568 569
def _rnn_dynamic_graph(
    cell,
    inputs,
    initial_states=None,
    sequence_length=None,
    time_major=False,
    is_reverse=False,
    **kwargs
):
F
Feiyu Chan 已提交
570 571 572 573
    time_step_index = 0 if time_major else 1
    flat_inputs = flatten(inputs)
    time_steps = flat_inputs[0].shape[time_step_index]

574 575
    if initial_states is None:
        initial_states = cell.get_initial_states(
576 577
            batch_ref=inputs, batch_dim_idx=1 if time_major else 0
        )
578

F
Feiyu Chan 已提交
579 580 581 582
    if not time_major:
        inputs = map_structure(_transpose_batch_time, inputs)

    if sequence_length is not None:
583 584 585
        mask = sequence_lod.sequence_mask(
            sequence_length, maxlen=time_steps, dtype=inputs.dtype
        )
586
        mask = paddle.transpose(mask, [1, 0])
F
Feiyu Chan 已提交
587 588

    if is_reverse:
589
        inputs = map_structure(lambda x: paddle.reverse(x, axis=[0]), inputs)
590
        mask = (
591
            paddle.reverse(mask, axis=[0])
592 593 594
            if sequence_length is not None
            else None
        )
F
Feiyu Chan 已提交
595 596 597 598 599 600 601

    states = initial_states
    outputs = []
    for i in range(time_steps):
        step_inputs = map_structure(lambda x: x[i], inputs)
        step_outputs, new_states = cell(step_inputs, states, **kwargs)
        if sequence_length is not None:
602 603 604
            new_states = map_structure(
                partial(_maybe_copy, step_mask=mask[i]), states, new_states
            )
F
Feiyu Chan 已提交
605
        states = new_states
606 607 608 609 610 611 612
        outputs = (
            map_structure(lambda x: ArrayWrapper(x), step_outputs)
            if i == 0
            else map_structure(
                lambda x, x_array: x_array.append(x), step_outputs, outputs
            )
        )
F
Feiyu Chan 已提交
613 614

    final_outputs = map_structure(
615
        lambda x: paddle.stack(x.array, axis=time_step_index), outputs
616
    )
F
Feiyu Chan 已提交
617 618 619

    if is_reverse:
        final_outputs = map_structure(
620
            lambda x: paddle.reverse(x, axis=time_step_index), final_outputs
621
        )
F
Feiyu Chan 已提交
622 623 624 625 626

    final_states = new_states
    return final_outputs, final_states


627 628 629 630 631 632 633 634 635
def _rnn_static_graph(
    cell,
    inputs,
    initial_states=None,
    sequence_length=None,
    time_major=False,
    is_reverse=False,
    **kwargs
):
X
Xing Wu 已提交
636 637 638
    check_type(inputs, 'inputs', (Variable, list, tuple), 'rnn')
    if isinstance(inputs, (list, tuple)):
        for i, input_x in enumerate(inputs):
639 640 641 642 643 644 645 646 647
            check_variable_and_dtype(
                input_x, 'inputs[' + str(i) + ']', ['float32', 'float64'], 'rnn'
            )
    check_type(
        initial_states,
        'initial_states',
        (Variable, list, tuple, type(None)),
        'rnn',
    )
X
Xing Wu 已提交
648

649 650 651
    check_type(
        sequence_length, 'sequence_length', (Variable, type(None)), 'rnn'
    )
G
Guo Sheng 已提交
652 653 654 655 656 657

    def _switch_grad(x, stop=False):
        x.stop_gradient = stop
        return x

    if initial_states is None:
658
        initial_states = cell.get_initial_states(
659 660
            batch_ref=inputs, batch_dim_idx=1 if time_major else 0
        )
G
Guo Sheng 已提交
661 662 663 664 665 666
    initial_states = map_structure(_switch_grad, initial_states)

    if not time_major:
        inputs = map_structure(_transpose_batch_time, inputs)

    if sequence_length:
2
201716010711 已提交
667
        max_seq_len = paddle.shape(flatten(inputs)[0])[0]
668
        mask = sequence_lod.sequence_mask(
G
Guo Sheng 已提交
669 670
            sequence_length,
            maxlen=max_seq_len,
671 672
            dtype=flatten(initial_states)[0].dtype,
        )
673
        mask = paddle.transpose(mask, [1, 0])
G
Guo Sheng 已提交
674
    if is_reverse:
675 676
        inputs = map_structure(lambda x: paddle.reverse(x, axis=[0]), inputs)
        mask = paddle.reverse(mask, axis=[0]) if sequence_length else None
G
Guo Sheng 已提交
677 678 679 680 681 682 683

    # StaticRNN
    rnn = control_flow.StaticRNN()
    with rnn.step():
        inputs = map_structure(rnn.step_input, inputs)
        states = map_structure(rnn.memory, initial_states)
        copy_states = map_structure(lambda x: x, states)
H
Huihuang Zheng 已提交
684
        outputs, new_states = cell(inputs, copy_states, **kwargs)
G
Guo Sheng 已提交
685 686 687 688
        assert_same_structure(states, new_states)
        if sequence_length:
            step_mask = rnn.step_input(mask)
            new_states = map_structure(
689 690
                partial(_maybe_copy, step_mask=step_mask), states, new_states
            )
G
Guo Sheng 已提交
691 692 693 694 695 696 697

        map_structure(rnn.update_memory, states, new_states)
        flat_outputs = flatten(outputs)
        map_structure(rnn.step_output, outputs)
        map_structure(rnn.step_output, new_states)

    rnn_out = rnn()
698
    final_outputs = rnn_out[: len(flat_outputs)]
G
Guo Sheng 已提交
699
    final_outputs = pack_sequence_as(outputs, final_outputs)
700
    final_states = map_structure(lambda x: x[-1], rnn_out[len(flat_outputs) :])
G
Guo Sheng 已提交
701 702 703
    final_states = pack_sequence_as(new_states, final_states)

    if is_reverse:
704
        final_outputs = map_structure(
705
            lambda x: paddle.reverse(x, axis=[0]), final_outputs
706
        )
G
Guo Sheng 已提交
707 708 709 710 711 712 713

    if not time_major:
        final_outputs = map_structure(_transpose_batch_time, final_outputs)

    return (final_outputs, final_states)


714 715 716 717 718 719 720 721 722
def birnn(
    cell_fw,
    cell_bw,
    inputs,
    initial_states=None,
    sequence_length=None,
    time_major=False,
    **kwargs
):
F
Feiyu Chan 已提交
723
    """
724 725 726
    birnn creates a bidirectional recurrent neural network specified by
    RNNCell `cell_fw` and `cell_bw`, which performs :code:`cell.call()`
    (for dygraph mode :code:`cell.forward`) repeatedly until reaches to
727
    the maximum length of `inputs` and then concat the outputs for both RNNs
F
Feiyu Chan 已提交
728 729 730 731 732
    along the last axis.

    Arguments:
        cell_fw(RNNCellBase): An instance of `RNNCellBase`.
        cell_bw(RNNCellBase): An instance of `RNNCellBase`.
733 734
        inputs(Tensor): the input sequences.
            If time_major is True, the shape is
F
Feiyu Chan 已提交
735 736
            `[time_steps, batch_size, input_size]`
            else the shape is `[batch_size, time_steps, input_size]`.
737
        initial_states(tuple, optional): A tuple of initial states of
F
Feiyu Chan 已提交
738
            `cell_fw` and `cell_bw`.
739
            If not provided, `cell.get_initial_states` would be called to
F
Feiyu Chan 已提交
740
            produce initial state for each cell. Defaults to None.
741
        sequence_length (Tensor, optional): shape `[batch_size]`, dtype: int64
F
Feiyu Chan 已提交
742
            or int32. The valid lengths of input sequences. Defaults to None.
743 744
            If `sequence_length` is not None, the inputs are treated as
            padded sequences. In each input sequence, elements whose time step
F
Feiyu Chan 已提交
745 746 747
            index are not less than the valid length are treated as paddings.
        time_major (bool): Whether the first dimension of the input means the
            time steps. Defaults to False.
748
        **kwargs: Additional keyword arguments to pass to `forward` of each cell.
F
Feiyu Chan 已提交
749 750 751

    Returns:
        (outputs, final_states)
752 753 754
        outputs (Tensor): the outputs of the bidirectional RNN. It is the
            concatenation of the outputs from the forward RNN and backward
            RNN along the last axis.
F
Feiyu Chan 已提交
755 756 757
            If time major is True, the shape is `[time_steps, batch_size, size]`,
            else the shape is `[batch_size, time_steps, size]`, where size is
            `cell_fw.hidden_size + cell_bw.hidden_size`.
758 759
        final_states (tuple): A tuple of the final states of the forward
            cell and backward cell.
F
Feiyu Chan 已提交
760 761 762 763

    Examples:

        .. code-block:: python
764

F
Feiyu Chan 已提交
765 766 767 768 769 770 771 772 773 774
            import paddle
            paddle.disable_static()

            cell_fw = paddle.nn.LSTMCell(16, 32)
            cell_bw = paddle.nn.LSTMCell(16, 32)

            inputs = paddle.rand((4, 23, 16))
            hf, cf = paddle.rand((4, 32)), paddle.rand((4, 32))
            hb, cb = paddle.rand((4, 32)), paddle.rand((4, 32))
            initial_states = ((hf, cf), (hb, cb))
775
            outputs, final_states = paddle.fluid.layers.birnn(
F
Feiyu Chan 已提交
776
                cell_fw, cell_bw, inputs, initial_states)
777

F
Feiyu Chan 已提交
778 779 780
    """
    if initial_states is None:
        states_fw = cell_fw.get_initial_states(
781 782
            batch_ref=inputs, batch_dim_idx=1 if time_major else 0
        )
F
Feiyu Chan 已提交
783
        states_bw = cell_fw.get_initial_states(
784 785
            batch_ref=inputs, batch_dim_idx=1 if time_major else 0
        )
F
Feiyu Chan 已提交
786 787
    else:
        states_fw, states_bw = initial_states
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
    outputs_fw, states_fw = rnn(
        cell_fw,
        inputs,
        states_fw,
        sequence_length,
        time_major=time_major,
        **kwargs
    )

    outputs_bw, states_bw = rnn(
        cell_bw,
        inputs,
        states_bw,
        sequence_length,
        time_major=time_major,
        is_reverse=True,
        **kwargs
    )

    outputs = map_structure(
        lambda x, y: tensor.concat([x, y], -1), outputs_fw, outputs_bw
    )
F
Feiyu Chan 已提交
810 811 812 813 814

    final_states = (states_fw, states_bw)
    return outputs, final_states


815 816 817 818 819 820 821 822 823 824
def _dynamic_decode_imperative(
    decoder,
    inits=None,
    max_step_num=None,
    output_time_major=False,
    impute_finished=False,
    is_test=False,
    return_length=False,
    **kwargs
):
825 826 827 828 829 830 831 832 833 834 835 836
    def _maybe_copy(state, new_state, step_mask):
        # TODO: use where_op
        state_dtype = state.dtype
        if convert_dtype(state_dtype) in ["bool"]:
            state = tensor.cast(state, dtype="float32")
            new_state = tensor.cast(new_state, dtype="float32")
        if step_mask.dtype != state.dtype:
            step_mask = tensor.cast(step_mask, dtype=state.dtype)
            # otherwise, renamed bool gradients of would be summed up leading
            # to sum(bool) error.
            step_mask.stop_gradient = True
        new_state = nn.elementwise_mul(
837 838
            state, step_mask, axis=0
        ) - nn.elementwise_mul(new_state, (step_mask - 1), axis=0)
839 840 841
        if convert_dtype(state_dtype) in ["bool"]:
            new_state = tensor.cast(new_state, dtype=state_dtype)
        return new_state
S
swtkiwi 已提交
842

843
    initial_inputs, initial_states, initial_finished = decoder.initialize(inits)
844 845 846 847 848
    inputs, states, finished = (
        initial_inputs,
        initial_states,
        initial_finished,
    )
849
    cond = paddle.logical_not((paddle.all(initial_finished)))
850
    sequence_lengths = tensor.cast(paddle.zeros_like(initial_finished), "int64")
851 852 853
    outputs = None

    step_idx = 0
854 855 856
    step_idx_tensor = tensor.fill_constant(
        shape=[1], dtype="int64", value=step_idx
    )
857
    while cond.numpy():
858 859 860
        (step_outputs, next_states, next_inputs, next_finished) = decoder.step(
            step_idx_tensor, inputs, states, **kwargs
        )
861 862 863 864 865
        if not decoder.tracks_own_finished:
            # BeamSearchDecoder would track it own finished, since
            # beams would be reordered and the finished status of each
            # entry might change. Otherwise, perform logical OR which
            # would not change the already finished.
2
201716010711 已提交
866
            next_finished = paddle.logical_or(next_finished, finished)
867 868 869
            # To confirm states.finished/finished be consistent with
            # next_finished.
            tensor.assign(next_finished, finished)
870
            next_sequence_lengths = paddle.add(
J
Jiaqi Liu 已提交
871
                sequence_lengths,
872
                tensor.cast(
2
201716010711 已提交
873
                    paddle.logical_not(finished), sequence_lengths.dtype
874 875
                ),
            )
J
Jiaqi Liu 已提交
876 877
            if impute_finished:  # rectify the states for the finished.
                next_states = map_structure(
878 879 880 881
                    lambda x, y: _maybe_copy(x, y, finished),
                    states,
                    next_states,
                )
J
Jiaqi Liu 已提交
882 883 884 885
        else:
            warnings.warn(
                "`next_states` has no `lengths` attribute, the returned `sequence_lengths` would be all zeros."
            ) if not hasattr(next_states, "lengths") else None
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
            next_sequence_lengths = getattr(
                next_states, "lengths", sequence_lengths
            )

        outputs = (
            map_structure(lambda x: ArrayWrapper(x), step_outputs)
            if step_idx == 0
            else map_structure(
                lambda x, x_array: x_array.append(x), step_outputs, outputs
            )
        )
        inputs, states, finished, sequence_lengths = (
            next_inputs,
            next_states,
            next_finished,
            next_sequence_lengths,
        )
G
Guo Sheng 已提交
903

904 905
        control_flow.increment(x=step_idx_tensor, value=1.0, in_place=True)
        step_idx += 1
G
Guo Sheng 已提交
906

907
        cond = paddle.logical_not(paddle.all(finished))
908 909
        if max_step_num is not None and step_idx > max_step_num:
            break
G
Guo Sheng 已提交
910

911 912 913
    final_outputs = map_structure(
        lambda x: paddle.stack(x.array, axis=0), outputs
    )
914
    final_states = states
G
Guo Sheng 已提交
915

916
    try:
917 918 919
        final_outputs, final_states = decoder.finalize(
            final_outputs, final_states, sequence_lengths
        )
920 921
    except NotImplementedError:
        pass
G
Guo Sheng 已提交
922

923 924
    if not output_time_major:
        final_outputs = map_structure(
925 926 927
            lambda x: paddle.transpose(
                x, [1, 0] + list(range(2, len(x.shape)))
            ),
928 929
            final_outputs,
        )
930

931 932 933 934 935
    return (
        (final_outputs, final_states, sequence_lengths)
        if return_length
        else (final_outputs, final_states)
    )
936 937


938 939 940 941 942 943 944 945 946 947
def _dynamic_decode_declarative(
    decoder,
    inits=None,
    max_step_num=None,
    output_time_major=False,
    impute_finished=False,
    is_test=False,
    return_length=False,
    **kwargs
):
G
Guo Sheng 已提交
948
    initial_inputs, initial_states, initial_finished = decoder.initialize(inits)
949 950 951 952 953
    global_inputs, global_states, global_finished = (
        initial_inputs,
        initial_states,
        initial_finished,
    )
954
    global_finished.stop_gradient = True
G
Guo Sheng 已提交
955
    step_idx = tensor.fill_constant(shape=[1], dtype="int64", value=0)
956

957
    cond = paddle.logical_not((paddle.all(initial_finished)))
G
Guo Sheng 已提交
958
    if max_step_num is not None:
959 960 961
        max_step_num = tensor.fill_constant(
            shape=[1], dtype="int64", value=max_step_num
        )
962
    while_op = paddle.static.nn.control_flow.While(cond, is_test=is_test)
G
Guo Sheng 已提交
963

964
    sequence_lengths = tensor.cast(paddle.zeros_like(initial_finished), "int64")
965 966 967 968 969 970 971 972 973
    sequence_lengths.stop_gradient = True

    if is_test:
        # for test, reuse inputs and states variables to save memory
        inputs = map_structure(lambda x: x, initial_inputs)
        states = map_structure(lambda x: x, initial_states)
    else:
        # inputs and states of all steps must be saved for backward and training
        inputs_arrays = map_structure(
974 975
            lambda x: control_flow.array_write(x, step_idx), initial_inputs
        )
976
        states_arrays = map_structure(
977 978
            lambda x: control_flow.array_write(x, step_idx), initial_states
        )
G
Guo Sheng 已提交
979 980 981

    def _maybe_copy(state, new_state, step_mask):
        # TODO: use where_op
982 983 984 985 986 987 988 989 990
        state_dtype = state.dtype
        if convert_dtype(state_dtype) in ["bool"]:
            state = tensor.cast(state, dtype="float32")
            new_state = tensor.cast(new_state, dtype="float32")
        if step_mask.dtype != state.dtype:
            step_mask = tensor.cast(step_mask, dtype=state.dtype)
            # otherwise, renamed bool gradients of would be summed up leading
            # to sum(bool) error.
            step_mask.stop_gradient = True
G
Guo Sheng 已提交
991
        new_state = nn.elementwise_mul(
992 993
            state, step_mask, axis=0
        ) - nn.elementwise_mul(new_state, (step_mask - 1), axis=0)
994 995
        if convert_dtype(state_dtype) in ["bool"]:
            new_state = tensor.cast(new_state, dtype=state_dtype)
G
Guo Sheng 已提交
996 997 998
        return new_state

    def _transpose_batch_time(x):
999
        return paddle.transpose(x, [1, 0] + list(range(2, len(x.shape))))
G
Guo Sheng 已提交
1000

1001 1002
    def _create_array_out_of_while(dtype):
        current_block_idx = default_main_program().current_block_idx
1003 1004 1005
        default_main_program().current_block_idx = (
            default_main_program().current_block().parent_idx
        )
1006
        tensor_array = paddle.tensor.create_array(dtype)
1007 1008 1009
        default_main_program().current_block_idx = current_block_idx
        return tensor_array

G
Guo Sheng 已提交
1010 1011
    # While
    with while_op.block():
1012 1013 1014
        if not is_test:
            inputs = map_structure(
                lambda array: control_flow.array_read(array, step_idx),
1015 1016
                inputs_arrays,
            )
1017 1018
            states = map_structure(
                lambda array: control_flow.array_read(array, step_idx),
1019 1020 1021 1022 1023
                states_arrays,
            )
        (outputs, next_states, next_inputs, next_finished) = decoder.step(
            step_idx, inputs, states, **kwargs
        )
1024 1025 1026 1027 1028
        if not decoder.tracks_own_finished:
            # BeamSearchDecoder would track it own finished, since beams would
            # be reordered and the finished status of each entry might change.
            # Otherwise, perform logical OR which would not change the already
            # finished.
2
201716010711 已提交
1029
            next_finished = paddle.logical_or(next_finished, global_finished)
1030
            next_sequence_lengths = paddle.add(
J
Jiaqi Liu 已提交
1031
                sequence_lengths,
1032
                tensor.cast(
2
201716010711 已提交
1033
                    paddle.logical_not(global_finished),
1034 1035 1036
                    sequence_lengths.dtype,
                ),
            )
J
Jiaqi Liu 已提交
1037 1038 1039 1040
            if impute_finished:  # rectify the states for the finished.
                next_states = map_structure(
                    lambda x, y: _maybe_copy(x, y, global_finished),
                    states,
1041 1042
                    next_states,
                )
J
Jiaqi Liu 已提交
1043 1044 1045 1046
        else:
            warnings.warn(
                "`next_states` has no `lengths` attribute, the returned `sequence_lengths` would be all zeros."
            ) if not hasattr(next_states, "lengths") else None
1047 1048 1049
            next_sequence_lengths = getattr(
                next_states, "lengths", sequence_lengths
            )
1050 1051 1052

        # create tensor array in global block after dtype[s] of outputs can be got
        outputs_arrays = map_structure(
1053 1054
            lambda x: _create_array_out_of_while(x.dtype), outputs
        )
1055

G
Guo Sheng 已提交
1056 1057
        map_structure(
            lambda x, x_array: control_flow.array_write(
1058 1059 1060 1061 1062
                x, i=step_idx, array=x_array
            ),
            outputs,
            outputs_arrays,
        )
G
Guo Sheng 已提交
1063
        control_flow.increment(x=step_idx, value=1.0, in_place=True)
1064 1065 1066 1067
        # update the global_finished first, since it might be also in states of
        # decoder, which otherwise would write a stale finished status to array
        tensor.assign(next_finished, global_finished)
        tensor.assign(next_sequence_lengths, sequence_lengths)
1068 1069 1070 1071 1072 1073
        if is_test:
            map_structure(tensor.assign, next_inputs, global_inputs)
            map_structure(tensor.assign, next_states, global_states)
        else:
            map_structure(
                lambda x, x_array: control_flow.array_write(
1074 1075 1076 1077 1078
                    x, i=step_idx, array=x_array
                ),
                next_inputs,
                inputs_arrays,
            )
1079 1080
            map_structure(
                lambda x, x_array: control_flow.array_write(
1081 1082 1083 1084 1085
                    x, i=step_idx, array=x_array
                ),
                next_states,
                states_arrays,
            )
G
Guo Sheng 已提交
1086
        if max_step_num is not None:
1087
            paddle.logical_and(
1088
                paddle.logical_not(paddle.all(global_finished)),
1089
                paddle.less_equal(step_idx, max_step_num),
1090 1091
                cond,
            )
G
Guo Sheng 已提交
1092
        else:
1093
            paddle.logical_not(paddle.all(global_finished), cond)
G
Guo Sheng 已提交
1094 1095 1096

    final_outputs = map_structure(
        lambda array: tensor.tensor_array_to_tensor(
1097 1098 1099 1100
            array, axis=0, use_stack=True
        )[0],
        outputs_arrays,
    )
1101 1102 1103 1104 1105
    if is_test:
        final_states = global_states
    else:
        final_states = map_structure(
            lambda array: control_flow.array_read(array, step_idx),
1106 1107
            states_arrays,
        )
G
Guo Sheng 已提交
1108 1109

    try:
1110 1111 1112
        final_outputs, final_states = decoder.finalize(
            final_outputs, final_states, sequence_lengths
        )
G
Guo Sheng 已提交
1113 1114 1115 1116 1117 1118
    except NotImplementedError:
        pass

    if not output_time_major:
        final_outputs = map_structure(_transpose_batch_time, final_outputs)

1119 1120 1121 1122 1123
    return (
        (final_outputs, final_states, sequence_lengths)
        if return_length
        else (final_outputs, final_states)
    )
1124 1125


1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
def dynamic_decode(
    decoder,
    inits=None,
    max_step_num=None,
    output_time_major=False,
    impute_finished=False,
    is_test=False,
    return_length=False,
    **kwargs
):
1136
    r"""
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
    Dynamic decoding performs :code:`decoder.step()` repeatedly until the returned
    Tensor indicating finished status contains all True values or the number of
    decoding step reaches to :attr:`max_step_num`.

    :code:`decoder.initialize()` would be called once before the decoding loop.
    If the `decoder` has implemented `finalize` method, :code:`decoder.finalize()`
    would be called once after the decoding loop.

    Parameters:
        decoder(Decoder): An instance of `Decoder`.
1147
        inits(object, optional): Argument passed to `decoder.initialize`.
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
            Default `None`.
        max_step_num(int, optional): The maximum number of steps. If not provided,
            decode until the decoder is fully done, or in other words, the returned
            Tensor by :code:`decoder.step()` indicating finished status contains
            all True. Default `None`.
        output_time_major(bool, optional): Indicate the data layout of Tensor included
            in the final outputs(the first returned value of this method). If
            attr:`False`, the data layout would be batch major with shape
            `[batch_size, seq_len, ...]`.  If attr:`True`, the data layout would
            be time major with shape `[seq_len, batch_size, ...]`. Default: `False`.
J
Jiaqi Liu 已提交
1158 1159 1160 1161 1162 1163 1164
        impute_finished(bool, optional): If `True` and `decoder.tracks_own_finished`
            is False, then states get copied through for batch entries which are
            marked as finished, which differs with the unfinished using the new states
            returned by :code:`decoder.step()` and ensures that the final states have
            the correct values. Otherwise, states wouldn't be copied through when
            finished. If the returned `final_states` is needed, it should be set as
            True, which causes some slowdown. Default `False`.
1165 1166 1167 1168 1169
        is_test(bool, optional): A flag indicating whether to use test mode. In
            test mode, it is more memory saving. Default `False`.
        return_length(bool, optional):  A flag indicating whether to return an
            extra Tensor variable in the output tuple, which stores the actual
            lengths of all decoded sequences. Default `False`.
1170
        **kwargs: Additional keyword arguments. Arguments passed to `decoder.step`.
1171 1172

    Returns:
1173

Z
Zman 已提交
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
        - final_outputs (Tensor, nested structure of Tensor), each Tensor in :code:`final_outputs` is the stacked of all decoding steps' outputs, which might be revised
            by :code:`decoder.finalize()` if the decoder has implemented finalize.
            And :code:`final_outputs` has the same structure and data types as the :code:`outputs`
            returned by :code:`decoder.step()`

        - final_states (Tensor, nested structure of Tensor), :code:`final_states` is the counterpart at last time step of initial states \
            returned by :code:`decoder.initialize()` , thus has the same structure
            with it and has tensors with same shapes and data types.

        - sequence_lengths (Tensor), stores the actual lengths of all decoded sequences.
            sequence_lengths is provided only if :code:`return_length` is True.
1185 1186 1187 1188

    Examples:

        .. code-block:: python
1189

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
            import paddle
            from paddle.nn import BeamSearchDecoder, dynamic_decode
            from paddle.nn import GRUCell, Linear, Embedding
            trg_embeder = Embedding(100, 32)
            output_layer = Linear(32, 32)
            decoder_cell = GRUCell(input_size=32, hidden_size=32)
            decoder = BeamSearchDecoder(decoder_cell,
                                        start_token=0,
                                        end_token=1,
                                        beam_size=4,
                                        embedding_fn=trg_embeder,
                                        output_fn=output_layer)
            encoder_output = paddle.ones((4, 8, 32), dtype=paddle.get_default_dtype())
            outputs = dynamic_decode(decoder=decoder,
                                    inits=decoder_cell.get_initial_states(encoder_output),
                                    max_step_num=10)
    """
J
Jiabin Yang 已提交
1207
    if _non_static_mode():
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
        return _dynamic_decode_imperative(
            decoder,
            inits,
            max_step_num,
            output_time_major,
            impute_finished,
            is_test,
            return_length,
            **kwargs
        )
1218
    else:
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
        return _dynamic_decode_declarative(
            decoder,
            inits,
            max_step_num,
            output_time_major,
            impute_finished,
            is_test,
            return_length,
            **kwargs
        )
1229 1230


1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
def dynamic_lstm(
    input,
    size,
    h_0=None,
    c_0=None,
    param_attr=None,
    bias_attr=None,
    use_peepholes=True,
    is_reverse=False,
    gate_activation='sigmoid',
    cell_activation='tanh',
    candidate_activation='tanh',
    dtype='float32',
    name=None,
):
1246
    r"""
1247
	:api_attr: Static Graph
S
swtkiwi 已提交
1248

1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
    **Note**:
        1. This OP only supports LoDTensor as inputs. If you need to deal with Tensor, please use :ref:`api_fluid_layers_lstm` .
        2. In order to improve efficiency, users must first map the input of dimension [T, hidden_size] to input of [T, 4 * hidden_size], and then pass it to this OP.

    The implementation of this OP include diagonal/peephole connections.
    Please refer to `Gers, F. A., & Schmidhuber, J. (2000) <ftp://ftp.idsia.ch/pub/juergen/TimeCount-IJCNN2000.pdf>`_ .
    If you do not need peephole connections, please set use_peepholes to False .

    This OP computes each timestep as follows:

    .. math::
      i_t = \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + b_{x_i} + b_{h_i})
    .. math::
      f_t = \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + b_{x_f} + b_{h_f})
    .. math::
      o_t = \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + b_{x_o} + b_{h_o})
    .. math::
      \widetilde{c_t} = tanh(W_{cx}x_t + W_{ch}h_{t-1} + b{x_c} + b_{h_c})
    .. math::
      c_t = f_t \odot c_{t-1} + i_t \odot \widetilde{c_t}
    .. math::
      h_t = o_t \odot tanh(c_t)

    The symbolic meanings in the formula are as follows:

    - :math:`x_{t}` represents the input at timestep :math:`t`
    - :math:`h_{t}` represents the hidden state at timestep :math:`t`
    - :math:`h_{t-1}, c_{t-1}` represent the hidden state and cell state at timestep :math:`t-1` , respectively
    - :math:`\widetilde{c_t}` represents the candidate cell state
    - :math:`i_t` , :math:`f_t` and :math:`o_t` represent input gate, forget gate, output gate, respectively
    - :math:`W` represents weight (e.g., :math:`W_{ix}` is the weight of a linear transformation of input :math:`x_{t}` when calculating input gate :math:`i_t` )
    - :math:`b` represents bias (e.g., :math:`b_{i}` is the bias of input gate)
    - :math:`\sigma` represents nonlinear activation function for gate, default sigmoid
    - :math:`\odot` represents the Hadamard product of a matrix, i.e. multiplying the elements of the same position for two matrices with the same dimension to get another matrix with the same dimension

    Parameters:
        input ( :ref:`api_guide_Variable_en` ): LSTM input tensor, multi-dimensional LODTensor of shape :math:`[T, 4*hidden\_size]` . Data type is float32 or float64.
        size (int): must be 4 * hidden_size.
        h_0( :ref:`api_guide_Variable_en` , optional): The initial hidden state of the LSTM, multi-dimensional Tensor of shape :math:`[batch\_size, hidden\_size]` .
                       Data type is float32 or float64. If set to None, it will be a vector of all 0. Default: None.
        c_0( :ref:`api_guide_Variable_en` , optional): The initial hidden state of the LSTM, multi-dimensional Tensor of shape :math:`[batch\_size, hidden\_size]` .
                       Data type is float32 or float64. If set to None, it will be a vector of all 0. `h_0` and `c_0` can be None but only at the same time. Default: None.
        param_attr(ParamAttr, optional): Parameter attribute of weight. If it is None, the default weight parameter attribute is used. Please refer to ref:`api_fluid_ParamAttr' .
                              If the user needs to set this parameter, the dimension must be :math:`[hidden\_size, 4*hidden\_size]` . Default: None.

                              - Weights = :math:`\{ W_{cr},W_{ir},W_{fr},W_{or} \}` , the shape is [hidden_size, 4*hidden_size].

        bias_attr (ParamAttr, optional): The bias attribute for the learnable bias
                              weights, which contains two parts, input-hidden
                              bias weights and peephole connections weights if
                              setting `use_peepholes` to `True`.
                              Please refer to ref:`api_fluid_ParamAttr' . Default: None.

                              1. `use_peepholes = False`
                                 - Biases = {:math:`b_c, b_i, b_f, b_o`}.
                                 - The shape is [1, 4*hidden_size].
                              2. `use_peepholes = True`
                                 - Biases = { :math:`b_c, b_i, b_f, b_o, W_{ic}, \
                                                 W_{fc}, W_{oc}`}.
                                 - The shape is [1, 7*hidden_size].
1309

1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
        use_peepholes (bool, optional): Whether to use peephole connection or not. Default: True.
        is_reverse (bool, optional): Whether to calculate reverse LSTM. Default: False.
        gate_activation (str, optional): The activation for input gate, forget gate and output gate. Default: "sigmoid".
        cell_activation (str, optional): The activation for cell output. Default: "tanh".
        candidate_activation (str, optional): The activation for candidate hidden state. Default: "tanh".
        dtype (str, optional): Data type, can be "float32" or "float64". Default: "float32".
        name (str, optional): A name for this layer. Please refer to :ref:`api_guide_Name` . Default: None.

    Returns:
        tuple ( :ref:`api_guide_Variable` , :ref:`api_guide_Variable` ) :

            The hidden state and cell state of LSTM

                - hidden: LoDTensor with shape of :math:`[T, hidden\_size]` , and its lod and dtype is the same as the input.
                - cell: LoDTensor with shape of :math:`[T, hidden\_size]` , and its lod and dtype is the same as the input.

    Examples:
        .. code-block:: python
1328

1329 1330 1331 1332
            import paddle.fluid as fluid
            emb_dim = 256
            vocab_size = 10000
            hidden_dim = 512
1333

1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
            data = fluid.data(name='x', shape=[None], dtype='int64', lod_level=1)
            emb = fluid.embedding(input=data, size=[vocab_size, emb_dim], is_sparse=True)

            forward_proj = fluid.layers.fc(input=emb, size=hidden_dim * 4,
                                           bias_attr=False)

            forward, cell = fluid.layers.dynamic_lstm(
                input=forward_proj, size=hidden_dim * 4, use_peepholes=False)
            forward.shape  # (-1, 512)
            cell.shape  # (-1, 512)
    """
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
    assert (
        _non_static_mode() is not True
    ), "please use lstm instead of dynamic_lstm in dygraph mode!"
    assert (
        bias_attr is not False
    ), "bias_attr should not be False in dynamic_lstm."

    check_variable_and_dtype(
        input, 'input', ['float32', 'float64'], 'dynamic_lstm'
    )
1355 1356 1357

    check_type(h_0, 'h_0', (Variable, type(None)), 'dynamic_lstm')
    if isinstance(h_0, Variable):
1358 1359 1360
        check_variable_and_dtype(
            h_0, 'h_0', ['float32', 'float64'], 'dynamic_lstm'
        )
1361 1362 1363

    check_type(c_0, 'c_0', (Variable, type(None)), 'dynamic_lstm')
    if isinstance(c_0, Variable):
1364 1365 1366
        check_variable_and_dtype(
            c_0, 'c_0', ['float32', 'float64'], 'dynamic_lstm'
        )
1367

1368 1369
    helper = LayerHelper('lstm', **locals())
    size = size // 4
1370 1371 1372
    weight = helper.create_parameter(
        attr=helper.param_attr, shape=[size, 4 * size], dtype=dtype
    )
1373 1374 1375
    bias_size = [1, 7 * size]
    if not use_peepholes:
        bias_size[1] = 4 * size
1376 1377 1378
    bias = helper.create_parameter(
        attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True
    )
1379 1380 1381 1382 1383 1384 1385 1386

    hidden = helper.create_variable_for_type_inference(dtype)
    cell = helper.create_variable_for_type_inference(dtype)
    batch_gate = helper.create_variable_for_type_inference(dtype)
    batch_cell_pre_act = helper.create_variable_for_type_inference(dtype)
    inputs = {'Input': input, 'Weight': weight, 'Bias': bias}
    batch_size = input.shape[0]
    if h_0:
1387
        assert h_0.shape == (batch_size, size), (
1388
            'The shape of h0 should be (batch_size, %d)' % size
1389
        )
1390 1391
        inputs['H0'] = h_0
    if c_0:
1392
        assert c_0.shape == (batch_size, size), (
1393
            'The shape of c0 should be (batch_size, %d)' % size
1394
        )
1395 1396
        inputs['C0'] = c_0

1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
    helper.append_op(
        type='lstm',
        inputs=inputs,
        outputs={
            'Hidden': hidden,
            'Cell': cell,
            'BatchGate': batch_gate,
            'BatchCellPreAct': batch_cell_pre_act,
        },
        attrs={
            'use_peepholes': use_peepholes,
            'is_reverse': is_reverse,
            'gate_activation': gate_activation,
            'cell_activation': cell_activation,
            'candidate_activation': candidate_activation,
        },
    )
1414 1415 1416
    return hidden, cell


1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
@deprecated(
    since='2.0.0',
    update_to='paddle.nn.LSTM',
    reason="This API may occur CUDNN errors.",
)
def lstm(
    input,
    init_h,
    init_c,
    max_len,
    hidden_size,
    num_layers,
    dropout_prob=0.0,
    is_bidirec=False,
    is_test=False,
    name=None,
    default_initializer=None,
    seed=-1,
):
1436
    r"""
1437
	:api_attr: Static Graph
S
swtkiwi 已提交
1438

1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
    **Note**:
        This OP only supports running on GPU devices.

    This OP implements LSTM operation - `Hochreiter, S., & Schmidhuber, J. (1997) <http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf>`_ .

    The implementation of this OP does not include diagonal/peephole connections.
    Please refer to `Gers, F. A., & Schmidhuber, J. (2000) <ftp://ftp.idsia.ch/pub/juergen/TimeCount-IJCNN2000.pdf>`_ .
    If you need peephole connections, please use :ref:`api_fluid_layers_dynamic_lstm` .

    This OP computes each timestep as follows:

    .. math::
      i_t = \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + b_{x_i} + b_{h_i})
    .. math::
      f_t = \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + b_{x_f} + b_{h_f})
    .. math::
      o_t = \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + b_{x_o} + b_{h_o})
    .. math::
      \widetilde{c_t} = tanh(W_{cx}x_t + W_{ch}h_{t-1} + b{x_c} + b_{h_c})
    .. math::
      c_t = f_t \odot c_{t-1} + i_t \odot \widetilde{c_t}
    .. math::
      h_t = o_t \odot tanh(c_t)

    The symbolic meanings in the formula are as follows:

    - :math:`x_{t}` represents the input at timestep :math:`t`
    - :math:`h_{t}` represents the hidden state at timestep :math:`t`
    - :math:`h_{t-1}, c_{t-1}` represent the hidden state and cell state at timestep :math:`t-1` , respectively
    - :math:`\widetilde{c_t}` represents the candidate cell state
    - :math:`i_t` , :math:`f_t` and :math:`o_t` represent input gate, forget gate, output gate, respectively
    - :math:`W` represents weight (e.g., :math:`W_{ix}` is the weight of a linear transformation of input :math:`x_{t}` when calculating input gate :math:`i_t` )
    - :math:`b` represents bias (e.g., :math:`b_{i}` is the bias of input gate)
    - :math:`\sigma` represents nonlinear activation function for gate, default sigmoid
    - :math:`\odot` represents the Hadamard product of a matrix, i.e. multiplying the elements of the same position for two matrices with the same dimension to get another matrix with the same dimension

    Parameters:
        input ( :ref:`api_guide_Variable_en` ): LSTM input tensor, 3-D Tensor of shape :math:`[batch\_size, seq\_len, input\_dim]` . Data type is float32 or float64
        init_h( :ref:`api_guide_Variable_en` ): The initial hidden state of the LSTM, 3-D Tensor of shape :math:`[num\_layers, batch\_size, hidden\_size]` .
                       If is_bidirec = True, shape should be :math:`[num\_layers*2, batch\_size, hidden\_size]` . Data type is float32 or float64.
G
GaoWei8 已提交
1479
        max_len (int): This parameter has no effect and will be discarded.
1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
        init_c( :ref:`api_guide_Variable_en` ): The initial cell state of the LSTM, 3-D Tensor of shape :math:`[num\_layers, batch\_size, hidden\_size]` .
                       If is_bidirec = True, shape should be :math:`[num\_layers*2, batch\_size, hidden\_size]` . Data type is float32 or float64.
        hidden_size (int): hidden size of the LSTM.
        num_layers (int): total layers number of the LSTM.
        dropout_prob(float, optional): dropout prob, dropout ONLY work between rnn layers, NOT between time steps
                             There is NO dropout work on rnn output of the last RNN layers.
                             Default: 0.0.
        is_bidirec (bool, optional): If it is bidirectional. Default: False.
        is_test (bool, optional): If it is in test phrase. Default: False.
        name (str, optional): A name for this layer. If set None, the layer
                         will be named automatically. Default: None.
        default_initializer(Initializer, optional): Where use initializer to initialize the Weight
T
tianshuo78520a 已提交
1492
                         If set None, default initializer will be used. Default: None.
1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
        seed(int, optional): Seed for dropout in LSTM, If it's -1, dropout will use random seed. Default: 1.


    Returns:
        tuple ( :ref:`api_guide_Variable_en` , :ref:`api_guide_Variable_en` , :ref:`api_guide_Variable_en` ) :

                        Three tensors, rnn_out, last_h, last_c:

                        - rnn_out is result of LSTM hidden, shape is :math:`[seq\_len, batch\_size, hidden\_size]` \
                          if is_bidirec set to True, shape will be :math:`[seq\_len, batch\_size, hidden\_size*2]`
                        - last_h is the hidden state of the last step of LSTM \
                          shape is :math:`[num\_layers, batch\_size, hidden\_size]` \
                          if is_bidirec set to True, shape will be :math:`[num\_layers*2, batch\_size, hidden\_size]`
                        - last_c(Tensor): the cell state of the last step of LSTM \
                          shape is :math:`[num\_layers, batch\_size, hidden\_size]` \
                          if is_bidirec set to True, shape will be :math:`[num\_layers*2, batch\_size, hidden\_size]`


    Examples:
        .. code-block:: python
1513

1514
            import paddle
1515 1516
            import paddle.fluid as fluid
            import paddle.fluid.layers as layers
1517
            paddle.enable_static()
1518 1519 1520 1521 1522

            emb_dim = 256
            vocab_size = 10000
            data = fluid.data(name='x', shape=[None, 100], dtype='int64')
            emb = fluid.embedding(input=data, size=[vocab_size, emb_dim], is_sparse=True)
1523
            batch_size = 100
1524 1525 1526 1527
            dropout_prob = 0.2
            input_size = 100
            hidden_size = 150
            num_layers = 1
1528
            max_len = 12
1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539
            init_h = layers.fill_constant( [num_layers, batch_size, hidden_size], 'float32', 0.0 )
            init_c = layers.fill_constant( [num_layers, batch_size, hidden_size], 'float32', 0.0 )
            rnn_out, last_h, last_c = layers.lstm( emb, init_h, init_c, \
                    max_len, hidden_size, num_layers, \
                    dropout_prob=dropout_prob)
            rnn_out.shape  # (-1, 100, 150)
            last_h.shape  # (1, 20, 150)
            last_c.shape  # (1, 20, 150)
    """

    helper = LayerHelper('cudnn_lstm', **locals())
X
Xing Wu 已提交
1540 1541 1542 1543 1544 1545
    check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'lstm')
    check_variable_and_dtype(init_h, 'init_h', ['float32', 'float64'], 'lstm')
    check_variable_and_dtype(init_c, 'init_c', ['float32', 'float64'], 'lstm')
    check_type(max_len, 'max_len', (int), 'lstm')
    check_type(hidden_size, 'hidden_size', (int), 'lstm')
    check_type(num_layers, 'num_layers', (int), 'lstm')
1546 1547 1548 1549
    dtype = input.dtype
    input_shape = list(input.shape)
    input_size = input_shape[-1]
    weight_size = 0
G
GaoWei8 已提交
1550 1551
    num_dirrection = 2 if is_bidirec == True else 1

1552 1553
    for i in range(num_layers):
        if i == 0:
G
GaoWei8 已提交
1554
            input_weight_size = (input_size * hidden_size) * 4 * num_dirrection
1555
        else:
G
GaoWei8 已提交
1556 1557
            input_weight_size = (hidden_size * hidden_size) * 4 * num_dirrection
        hidden_weight_size = (hidden_size * hidden_size) * 4 * num_dirrection
1558

G
GaoWei8 已提交
1559 1560
        weight_size += input_weight_size + hidden_weight_size
        weight_size += hidden_size * 8 * num_dirrection
1561

1562 1563 1564 1565 1566 1567
    weight = helper.create_parameter(
        attr=helper.param_attr,
        shape=[weight_size],
        dtype=dtype,
        default_initializer=default_initializer,
    )
1568 1569 1570 1571

    out = helper.create_variable_for_type_inference(dtype)
    last_h = helper.create_variable_for_type_inference(dtype)
    last_c = helper.create_variable_for_type_inference(dtype)
G
GaoWei8 已提交
1572
    reserve = helper.create_variable_for_type_inference(
1573 1574
        dtype=core.VarDesc.VarType.UINT8, stop_gradient=True
    )
G
GaoWei8 已提交
1575
    state_out = helper.create_variable_for_type_inference(
1576 1577
        dtype=core.VarDesc.VarType.UINT8, stop_gradient=True
    )
G
GaoWei8 已提交
1578
    state_out.persistable = True
1579

1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
    helper.append_op(
        type='cudnn_lstm',
        inputs={
            'Input': input,
            'InitH': init_h,
            'InitC': init_c,
            'W': weight,
        },
        outputs={
            'Out': out,
            'LastH': last_h,
            'LastC': last_c,
            'Reserve': reserve,
            'StateOut': state_out,
        },
        attrs={
            'is_bidirec': is_bidirec,
            'input_size': input_size,
            'hidden_size': hidden_size,
            'num_layers': num_layers,
            'is_test': is_test,
            'dropout_prob': dropout_prob,
            'seed': seed,
        },
    )
1605 1606 1607
    return out, last_h, last_c


1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
def dynamic_lstmp(
    input,
    size,
    proj_size,
    param_attr=None,
    bias_attr=None,
    use_peepholes=True,
    is_reverse=False,
    gate_activation='sigmoid',
    cell_activation='tanh',
    candidate_activation='tanh',
    proj_activation='tanh',
    dtype='float32',
    name=None,
    h_0=None,
    c_0=None,
    cell_clip=None,
    proj_clip=None,
):
1627
    r"""
1628
	:api_attr: Static Graph
S
swtkiwi 已提交
1629

1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748
    **Note**:
        1. In order to improve efficiency, users must first map the input of dimension [T, hidden_size] to input of [T, 4 * hidden_size], and then pass it to this OP.

    This OP implements the LSTMP (LSTM Projected) layer.
    The LSTMP layer has a separate linear mapping layer behind the LSTM layer. -- `Sak, H., Senior, A., & Beaufays, F. (2014) <https://ai.google/research/pubs/pub43905.pdf>`_ .

    Compared with the standard LSTM layer, LSTMP has an additional linear mapping layer,
    which is used to map from the original hidden state :math:`h_t` to the lower dimensional state :math:`r_t` .
    This reduces the total number of parameters and computational complexity, especially when the output unit is relatively large.

    The default implementation of the OP contains diagonal/peephole connections,
    please refer to `Gers, F. A., & Schmidhuber, J. (2000) <ftp://ftp.idsia.ch/pub/juergen/TimeCount-IJCNN2000.pdf>`_ .
    If you need to disable the peephole connections, set use_peepholes to False.

    This OP computes each timestep as follows:

    .. math::
      i_t = \sigma(W_{ix}x_{t} + W_{ir}r_{t-1} + W_{ic}c_{t-1} + b_i)
    .. math::
          f_t = \sigma(W_{fx}x_{t} + W_{fr}r_{t-1} + W_{fc}c_{t-1} + b_f)
    .. math::
          o_t = \sigma(W_{ox}x_{t} + W_{or}r_{t-1} + W_{oc}c_{t-1} + b_o)
    .. math::
          \widetilde{c_t} = act_g(W_{cx}x_t + W_{cr}r_{t-1} + b_c)
    .. math::
          c_t = f_t \odot c_{t-1} + i_t \odot \widetilde{c_t}
    .. math::
          h_t = o_t \odot act_h(c_t)
    .. math::
          r_t = \overline{act_h}(W_{rh}h_t)

    The symbolic meanings in the formula are as follows:

    - :math:`x_{t}` represents the input at timestep :math:`t`
    - :math:`h_{t}` represents the hidden state at timestep :math:`t`
    - :math:`r_{t}` : represents the state of the projected output of the hidden state :math:`h_{t}`
    - :math:`h_{t-1}, c_{t-1}, r_{t-1}` represent the hidden state, cell state and projected output at timestep :math:`t-1` , respectively
    - :math:`\widetilde{c_t}` represents the candidate cell state
    - :math:`i_t` , :math:`f_t` and :math:`o_t` represent input gate, forget gate, output gate, respectively
    - :math:`W` represents weight (e.g., :math:`W_{ix}` is the weight of a linear transformation of input :math:`x_{t}` when calculating input gate :math:`i_t` )
    - :math:`b` represents bias (e.g., :math:`b_{i}` is the bias of input gate)
    - :math:`\sigma` represents nonlinear activation function for gate, default sigmoid
    - :math:`\odot` represents the Hadamard product of a matrix, i.e. multiplying the elements of the same position for two matrices with the same dimension to get another matrix with the same dimension

    Parameters:
        input( :ref:`api_guide_Variable_en` ): The input of dynamic_lstmp layer, which supports
                         variable-time length input sequence.
                         It is a multi-dimensional LODTensor of shape :math:`[T, 4*hidden\_size]` . Data type is float32 or float64.
        size(int): must be 4 * hidden_size.
        proj_size(int): The size of projection output.
        param_attr(ParamAttr, optional): Parameter attribute of weight. If it is None, the default weight parameter attribute is used. Please refer to ref:`api_fluid_ParamAttr' .
                              If the user needs to set this parameter, the dimension must be :math:`[hidden\_size, 4*hidden\_size]` . Default: None.

                              - Weights = :math:`\{ W_{cr},W_{ir},W_{fr},W_{or} \}` , the shape is [P, 4*hidden_size] , where P is the projection size.
                              - Projection weight  = :math:`\{ W_{rh} \}` , the shape is [hidden_size, P].

        bias_attr (ParamAttr, optional): The bias attribute for the learnable bias
                              weights, which contains two parts, input-hidden
                              bias weights and peephole connections weights if
                              setting `use_peepholes` to `True`.
                              Please refer to ref:`api_fluid_ParamAttr' . Default: None.

                              1. `use_peepholes = False`
                                 - Biases = {:math:`b_c, b_i, b_f, b_o`}.
                                 - The shape is [1, 4*hidden_size].
                              2. `use_peepholes = True`
                                 - Biases = { :math:`b_c, b_i, b_f, b_o, W_{ic}, \
                                                 W_{fc}, W_{oc}`}.
                                 - The shape is [1, 7*hidden_size].

        use_peepholes (bool, optional): Whether to use peephole connection or not. Default True.
        is_reverse (bool, optional): Whether to calculate reverse LSTM. Default False.
        gate_activation (str, optional): The activation for input gate, forget gate and output gate. Default "sigmoid".
        cell_activation (str, optional): The activation for cell output. Default "tanh".
        candidate_activation (str, optional): The activation for candidate hidden state. Default "tanh".
        proj_activation(str, optional): The activation for projection output. Default "tanh".
        dtype (str, optional): Data type, can be "float32" or "float64". Default "float32".
        name (str, optional): A name for this layer. Please refer to :ref:`api_guide_Name` . Default: None.
        h_0( :ref:`api_guide_Variable` , optional): The initial hidden state is an optional input, default is zero.
                       This is a tensor with shape :math:`[batch\_size, P]` , where P is the projection size. Default: None.
        c_0( :ref:`api_guide_Variable` , optional): The initial cell state is an optional input, default is zero.
                       This is a tensor with shape :math:`[batch\_size, P]` , where P is the projection size.
                       `h_0` and `c_0` can be None but only at the same time. Default: None.
        cell_clip(float, optional): If not None, the cell state is clipped
                             by this value prior to the cell output activation. Default: None.
        proj_clip(float, optional): If `num_proj > 0` and `proj_clip` is
                            provided, then the projected values are clipped elementwise to within
                            `[-proj_clip, proj_clip]`. Default: None.

    Returns:
        tuple ( :ref:`api_guide_Variable` , :ref:`api_guide_Variable` ) :

                The hidden state and cell state of LSTMP

                - hidden: LoDTensor with shape of :math:`[T, P]` , and its lod and dtype is the same as the input.
                - cell: LoDTensor with shape of :math:`[T, hidden\_size]` , and its lod and dtype is the same as the input.

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid
            dict_dim, emb_dim = 128, 64
            data = fluid.data(name='sequence', shape=[None], dtype='int64', lod_level=1)
            emb = fluid.embedding(input=data, size=[dict_dim, emb_dim])
            hidden_dim, proj_dim = 512, 256
            fc_out = fluid.layers.fc(input=emb, size=hidden_dim * 4,
                                    act=None, bias_attr=None)
            proj_out, last_c = fluid.layers.dynamic_lstmp(input=fc_out,
                                                    size=hidden_dim * 4,
                                                    proj_size=proj_dim,
                                                    use_peepholes=False,
                                                    is_reverse=True,
                                                    cell_activation="tanh",
                                                    proj_activation="tanh")
            proj_out.shape  # (-1, 256)
            last_c.shape  # (-1, 512)
    """

1749 1750 1751
    assert (
        _non_static_mode() is not True
    ), "please use lstm instead of dynamic_lstmp in dygraph mode!"
1752

1753 1754 1755
    assert (
        bias_attr is not False
    ), "bias_attr should not be False in dynamic_lstmp."
1756

1757 1758 1759
    check_variable_and_dtype(
        input, 'input', ['float32', 'float64'], 'dynamic_lstmp'
    )
1760 1761 1762

    check_type(h_0, 'h_0', (Variable, type(None)), 'dynamic_lstmp')
    if isinstance(h_0, Variable):
1763 1764 1765
        check_variable_and_dtype(
            h_0, 'h_0', ['float32', 'float64'], 'dynamic_lstmp'
        )
1766 1767 1768

    check_type(c_0, 'c_0', (Variable, type(None)), 'dynamic_lstmp')
    if isinstance(c_0, Variable):
1769 1770 1771
        check_variable_and_dtype(
            c_0, 'c_0', ['float32', 'float64'], 'dynamic_lstmp'
        )
1772

1773 1774
    helper = LayerHelper('lstmp', **locals())
    size = size // 4
1775 1776 1777 1778 1779 1780
    weight = helper.create_parameter(
        attr=helper.param_attr, shape=[proj_size, 4 * size], dtype=dtype
    )
    proj_weight = helper.create_parameter(
        attr=helper.param_attr, shape=[size, proj_size], dtype=dtype
    )
1781 1782 1783
    bias_size = [1, 7 * size]
    if not use_peepholes:
        bias_size[1] = 4 * size
1784 1785 1786
    bias = helper.create_parameter(
        attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True
    )
1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797

    projection = helper.create_variable_for_type_inference(dtype)
    cell = helper.create_variable_for_type_inference(dtype)
    ordered_proj0 = helper.create_variable_for_type_inference(dtype)
    batch_hidden = helper.create_variable_for_type_inference(dtype)
    batch_gate = helper.create_variable_for_type_inference(dtype)
    batch_cell_pre_act = helper.create_variable_for_type_inference(dtype)
    inputs = {
        'Input': input,
        'Weight': weight,
        'ProjWeight': proj_weight,
1798
        'Bias': bias,
1799 1800 1801
    }
    batch_size = input.shape[0]
    if h_0:
1802
        assert h_0.shape == (batch_size, proj_size), (
1803
            'The shape of h0 should be (batch_size, %d)' % proj_size
1804
        )
1805 1806
        inputs['H0'] = h_0
    if c_0:
1807
        assert c_0.shape == (batch_size, size), (
1808
            'The shape of c0 should be (batch_size, %d)' % size
1809
        )
1810 1811 1812
        inputs['C0'] = c_0

    if cell_clip:
T
tianshuo78520a 已提交
1813
        assert cell_clip >= 0, "cell_clip should not be negative."
1814
    if proj_clip:
T
tianshuo78520a 已提交
1815
        assert proj_clip >= 0, "proj_clip should not be negative."
1816

1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
    helper.append_op(
        type='lstmp',
        inputs=inputs,
        outputs={
            'Projection': projection,
            'Cell': cell,
            'BatchHidden': batch_hidden,
            'BatchGate': batch_gate,
            'BatchCellPreAct': batch_cell_pre_act,
        },
        attrs={
            'use_peepholes': use_peepholes,
            'cell_clip': cell_clip,
            'proj_clip': proj_clip,
            'is_reverse': is_reverse,
            'gate_activation': gate_activation,
            'cell_activation': cell_activation,
            'candidate_activation': candidate_activation,
            'proj_activation': proj_activation,
        },
    )
1838 1839 1840
    return projection, cell


1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
def dynamic_gru(
    input,
    size,
    param_attr=None,
    bias_attr=None,
    is_reverse=False,
    gate_activation='sigmoid',
    candidate_activation='tanh',
    h_0=None,
    origin_mode=False,
):
1852
    r"""
1853
	:api_attr: Static Graph
S
swtkiwi 已提交
1854

1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
    **Note: The input type of this must be LoDTensor. If the input type to be
    processed is Tensor, use** :ref:`api_fluid_layers_StaticRNN` .

    This operator is used to perform the calculations for a single layer of
    Gated Recurrent Unit (GRU) on full sequences step by step. The calculations
    in one time step support these two modes:

    If ``origin_mode`` is True, then the formula used is from paper
    `Learning Phrase Representations using RNN Encoder Decoder for Statistical
    Machine Translation <https://arxiv.org/pdf/1406.1078.pdf>`_ .

    .. math::

        u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)

        r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)

        \\tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)

        h_t & = u_t \odot h_{t-1} + (1-u_t) \odot \\tilde{h_t}


    if ``origin_mode`` is False, then the formula used is from paper
    `Empirical Evaluation of Gated Recurrent Neural Networks on Sequence
    Modeling  <https://arxiv.org/pdf/1412.3555.pdf>`_

    .. math::

        u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)

        r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)

        \\tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)

        h_t & = (1-u_t) \odot h_{t-1} + u_t \odot \\tilde{h_t}

    :math:`x_t` is the input of current time step, but it is not from ``input`` .
    This operator does not include the calculations :math:`W_{ux}x_{t}, W_{rx}x_{t}, W_{cx}x_{t}` ,
    **Note** thus a fully-connect layer whose size is 3 times of ``size`` should
    be used before this operator, and the output should be used as ``input`` here.
1895
    :math:`h_{t-1}` is the hidden state from previous time step.
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921
    :math:`u_t` , :math:`r_t` , :math:`\\tilde{h_t}` and :math:`h_t` stand for
    update gate, reset gate, candidate hidden and hidden output separately.
    :math:`W_{uh}, b_u` , :math:`W_{rh}, b_r` and :math:`W_{ch}, b_c` stand for
    the weight matrix and bias used in update gate, reset gate, candidate hidden
    calculations. For implementation, the three weight matrix are merged into a
    tensor shaped :math:`[D, D \\times 3]` , the three bias are concatenated as
    a tensor shaped :math:`[1, D \\times 3]` , where :math:`D` stands for the
    hidden size; The data layout of weight tensor is: :math:`W_{uh}` and :math:`W_{rh}`
    are concatenated with shape :math:`[D, D  \\times 2]` lying on the first part,
    and :math:`W_{ch}` lying on the latter part with shape :math:`[D, D]` .


    Args:
        input(Variable): A LoDTensor whose lod level is 1, representing the input
            after linear projection. Its shape should be :math:`[T, D \\times 3]` ,
            where :math:`T` stands for the total sequence lengths in this mini-batch,
            :math:`D` for the hidden size. The data type should be float32 or float64.
        size(int): Indicate the hidden size.
        param_attr(ParamAttr, optional):  To specify the weight parameter property.
            Default: None, which means the default weight parameter property is used.
            See usage for details in :ref:`api_fluid_ParamAttr` .
        bias_attr (ParamAttr, optional): To specify the bias parameter property.
            Default: None, which means the default bias parameter property is used.
            See usage for details in :ref:`api_fluid_ParamAttr` .
        is_reverse(bool, optional): Whether to compute in the reversed order of
            input sequences. Default False.
T
tianshuo78520a 已提交
1922
        gate_activation(str, optional): The activation function corresponding to
1923 1924
            :math:`act_g` in the formula. "sigmoid", "tanh", "relu" and "identity"
            are supported. Default "sigmoid".
T
tianshuo78520a 已提交
1925
        candidate_activation(str, optional): The activation function corresponding to
1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
            :math:`act_c` in the formula. "sigmoid", "tanh", "relu" and "identity"
            are supported. Default "tanh".
        h_0 (Variable, optional): A Tensor representing the initial hidden state.
            It not provided, the default initial hidden state is 0. The shape is
            :math:`[N, D]` , where :math:`N` is the number of sequences in the
            mini-batch, :math:`D` for the hidden size. The data type should be
            same as ``input`` . Default None.

    Returns:
        Variable: A LoDTensor whose lod level is 1 and shape is :math:`[T, D]` , \
            where :math:`T` stands for the total sequence lengths in this mini-batch \
            :math:`D` for the hidden size. It represents GRU transformed sequence output, \
            and has the same lod and data type with ``input`` .

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid

            dict_dim, emb_dim = 128, 64
            data = fluid.data(name='sequence',
                      shape=[None],
                      dtype='int64',
                      lod_level=1)
            emb = fluid.embedding(input=data, size=[dict_dim, emb_dim])
            hidden_dim = 512
            x = fluid.layers.fc(input=emb, size=hidden_dim * 3)
            hidden = fluid.layers.dynamic_gru(input=x, size=hidden_dim)
    """

1957 1958 1959
    assert (
        _non_static_mode() is not True
    ), "please use gru instead of dynamic_gru in dygraph mode!"
1960

1961 1962 1963
    check_variable_and_dtype(
        input, 'input', ['float32', 'float64'], 'dynamic_gru'
    )
1964 1965 1966

    check_type(h_0, 'h_0', (Variable, type(None)), 'dynamic_gru')
    if isinstance(h_0, Variable):
1967 1968 1969
        check_variable_and_dtype(
            h_0, 'h_0', ['float32', 'float64'], 'dynamic_gru'
        )
1970

1971 1972 1973
    helper = LayerHelper('gru', **locals())
    dtype = helper.input_dtype()

1974 1975 1976 1977 1978 1979
    weight = helper.create_parameter(
        attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype
    )
    bias = helper.create_parameter(
        attr=helper.bias_attr, shape=[1, 3 * size], dtype=dtype, is_bias=True
    )
1980 1981 1982
    batch_size = input.shape[0]
    inputs = {'Input': input, 'Weight': weight, 'Bias': bias}
    if h_0:
1983 1984 1985
        assert h_0.shape == (batch_size, size), (
            'The shape of h0 should be(batch_size, %d)' % size
        )
1986 1987 1988 1989 1990 1991 1992
        inputs['H0'] = h_0

    hidden = helper.create_variable_for_type_inference(dtype)
    batch_gate = helper.create_variable_for_type_inference(dtype)
    batch_reset_hidden_prev = helper.create_variable_for_type_inference(dtype)
    batch_hidden = helper.create_variable_for_type_inference(dtype)

1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008
    helper.append_op(
        type='gru',
        inputs=inputs,
        outputs={
            'Hidden': hidden,
            'BatchGate': batch_gate,
            'BatchResetHiddenPrev': batch_reset_hidden_prev,
            'BatchHidden': batch_hidden,
        },
        attrs={
            'is_reverse': is_reverse,
            'gate_activation': gate_activation,
            'activation': candidate_activation,
            'origin_mode': origin_mode,
        },
    )
2009 2010 2011
    return hidden


2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
def gru_unit(
    input,
    hidden,
    size,
    param_attr=None,
    bias_attr=None,
    activation='tanh',
    gate_activation='sigmoid',
    origin_mode=False,
):
2022
    r"""
2023
	:api_attr: Static Graph
S
swtkiwi 已提交
2024

2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
    Gated Recurrent Unit (GRU) RNN cell. This operator performs GRU calculations for
    one time step and it supports these two modes:

    If ``origin_mode`` is True, then the formula used is from paper
    `Learning Phrase Representations using RNN Encoder Decoder for Statistical
    Machine Translation <https://arxiv.org/pdf/1406.1078.pdf>`_ .

    .. math::

        u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)

        r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)

        \\tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)

        h_t & = u_t \odot h_{t-1} + (1-u_t) \odot \\tilde{h_t}


    if ``origin_mode`` is False, then the formula used is from paper
    `Empirical Evaluation of Gated Recurrent Neural Networks on Sequence
    Modeling  <https://arxiv.org/pdf/1412.3555.pdf>`_

    .. math::

        u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)

        r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)

        \\tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)

        h_t & = (1-u_t) \odot h_{t-1} + u_t \odot \\tilde{h_t}

    :math:`x_t` is the input of current time step, but it is not ``input`` .
    This operator does not include the calculations :math:`W_{ux}x_{t}, W_{rx}x_{t}, W_{cx}x_{t}` ,
    **Note** thus a fully-connect layer whose size is 3 times of GRU hidden size should
    be used before this operator, and the output should be used as ``input`` here.
2061
    :math:`h_{t-1}` is the hidden state from previous time step.
2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088
    :math:`u_t` , :math:`r_t` , :math:`\\tilde{h_t}` and :math:`h_t` stand for
    update gate, reset gate, candidate hidden and hidden output separately.
    :math:`W_{uh}, b_u` , :math:`W_{rh}, b_r` and :math:`W_{ch}, b_c` stand for
    the weight matrix and bias used in update gate, reset gate, candidate hidden
    calculations. For implementation, the three weight matrix are merged into a
    tensor shaped :math:`[D, D \\times 3]` , the three bias are concatenated as
    a tensor shaped :math:`[1, D \\times 3]` , where :math:`D` stands for the
    hidden size; The data layout of weight tensor is: :math:`W_{uh}` and :math:`W_{rh}`
    are concatenated with shape :math:`[D, D  \\times 2]` lying on the first part,
    and :math:`W_{ch}` lying on the latter part with shape :math:`[D, D]` .


    Args:
        input(Variable): A 2D Tensor representing the input after linear projection
            after linear projection. Its shape should be :math:`[N, D \\times 3]` ,
            where :math:`N` stands for batch size, :math:`D` for the hidden size.
            The data type should be float32 or float64.
        hidden(Variable): A 2D Tensor representing the hidden state from previous step.
            Its shape should be :math:`[N, D]` , where :math:`N` stands for batch size,
            :math:`D` for the hidden size. The data type should be same as ``input`` .
        size(int): Indicate the hidden size.
        param_attr(ParamAttr, optional):  To specify the weight parameter property.
            Default: None, which means the default weight parameter property is used.
            See usage for details in :ref:`api_fluid_ParamAttr` .
        bias_attr (ParamAttr, optional): To specify the bias parameter property.
            Default: None, which means the default bias parameter property is used.
            See usage for details in :ref:`api_fluid_ParamAttr` .
T
tianshuo78520a 已提交
2089
        activation(str, optional): The activation function corresponding to
2090 2091
            :math:`act_c` in the formula. "sigmoid", "tanh", "relu" and "identity"
            are supported. Default "tanh".
T
tianshuo78520a 已提交
2092
        gate_activation(str, optional): The activation function corresponding to
2093 2094 2095 2096 2097 2098
            :math:`act_g` in the formula. "sigmoid", "tanh", "relu" and "identity"
            are supported. Default "sigmoid".

    Returns:
        tuple: The tuple contains three Tensor variables with the same data type \
            as ``input`` . They represent the hidden state for next time step ( :math:`h_t` ), \
T
tianshuo78520a 已提交
2099
            reset previous hidden state ( :math:`r_t \odot h_{t-1}` ), and the \
2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
            concatenation of :math:`h_t, r_t, \\tilde{h_t}` . And they have shape \
            :math:`[N, D]` , :math:`[N, D]` , :math:`[N, D \times 3]` separately. \
            Usually only the hidden state for next time step ( :math:`h_t` ) is used \
            as output and state, the other two are intermediate results of calculations.

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid

            dict_dim, emb_dim = 128, 64
            data = fluid.data(name='step_data', shape=[None], dtype='int64')
            emb = fluid.embedding(input=data, size=[dict_dim, emb_dim])
            hidden_dim = 512
            x = fluid.layers.fc(input=emb, size=hidden_dim * 3)
            pre_hidden = fluid.data(
                name='pre_hidden', shape=[None, hidden_dim], dtype='float32')
            hidden = fluid.layers.gru_unit(
                input=x, hidden=pre_hidden, size=hidden_dim * 3)

    """
X
Xing Wu 已提交
2122
    check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'gru_unit')
2123 2124 2125
    check_variable_and_dtype(
        hidden, 'hidden', ['float32', 'float64'], 'gru_unit'
    )
X
Xing Wu 已提交
2126
    check_type(size, 'size', (int), 'gru_unit')
2127 2128 2129 2130
    activation_dict = dict(
        identity=0,
        sigmoid=1,
        tanh=2,
2131 2132
        relu=3,
    )
2133 2134 2135 2136 2137 2138 2139 2140
    activation = activation_dict[activation]
    gate_activation = activation_dict[gate_activation]

    helper = LayerHelper('gru_unit', **locals())
    dtype = helper.input_dtype()
    size = size // 3

    # create weight
2141 2142 2143
    weight = helper.create_parameter(
        attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype
    )
2144 2145 2146 2147 2148 2149 2150 2151

    gate = helper.create_variable_for_type_inference(dtype)
    reset_hidden_pre = helper.create_variable_for_type_inference(dtype)
    updated_hidden = helper.create_variable_for_type_inference(dtype)
    inputs = {'Input': input, 'HiddenPrev': hidden, 'Weight': weight}
    # create bias
    if helper.bias_attr:
        bias_size = [1, 3 * size]
2152 2153 2154
        bias = helper.create_parameter(
            attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True
        )
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167
        inputs['Bias'] = bias

    helper.append_op(
        type='gru_unit',
        inputs=inputs,
        outputs={
            'Gate': gate,
            'ResetHiddenPrev': reset_hidden_pre,
            'Hidden': updated_hidden,
        },
        attrs={
            'activation': 2,  # tanh
            'gate_activation': 1,  # sigmoid
2168 2169 2170
            'origin_mode': origin_mode,
        },
    )
2171 2172 2173 2174

    return updated_hidden, reset_hidden_pre, gate


2175 2176 2177 2178 2179 2180 2181 2182 2183
def lstm_unit(
    x_t,
    hidden_t_prev,
    cell_t_prev,
    forget_bias=0.0,
    param_attr=None,
    bias_attr=None,
    name=None,
):
2184
    r"""
2185
	:api_attr: Static Graph
S
swtkiwi 已提交
2186

2187 2188 2189 2190 2191 2192
    Long-Short Term Memory (LSTM) RNN cell. This operator performs LSTM calculations for
    one time step, whose implementation is based on calculations described in `RECURRENT
    NEURAL NETWORK REGULARIZATION <http://arxiv.org/abs/1409.2329>`_  .

    We add forget_bias to the biases of the forget gate in order to
    reduce the scale of forgetting. The formula is as follows:
2193

2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
    .. math::

        i_{t} & = \sigma(W_{x_{i}}x_{t} + W_{h_{i}}h_{t-1} + b_{i})

        f_{t} & = \sigma(W_{x_{f}}x_{t} + W_{h_{f}}h_{t-1} + b_{f} + forget\\_bias)

        c_{t} & = f_{t}c_{t-1} + i_{t} tanh (W_{x_{c}}x_{t} + W_{h_{c}}h_{t-1} + b_{c})

        o_{t} & = \sigma(W_{x_{o}}x_{t} + W_{h_{o}}h_{t-1} + b_{o})

        h_{t} & = o_{t} tanh (c_{t})

    :math:`x_{t}` stands for ``x_t`` , corresponding to the input of current time step;
    :math:`h_{t-1}` and :math:`c_{t-1}` correspond to ``hidden_t_prev`` and ``cell_t_prev`` ,
    representing the output of from previous time step.
    :math:`i_{t}, f_{t}, c_{t}, o_{t}, h_{t}` are input gate, forget gate, cell, output gate
    and hidden calculation.

    Args:
        x_t(Variable): A 2D Tensor representing the input of current time step.
            Its shape should be :math:`[N, M]` , where :math:`N` stands for batch
            size, :math:`M` for the feature size of input. The data type should
            be float32 or float64.
        hidden_t_prev(Variable): A 2D Tensor representing the hidden value from
            previous step. Its shape should be :math:`[N, D]` , where :math:`N`
            stands for batch size, :math:`D` for the hidden size. The data type
            should be same as ``x_t`` .
        cell_t_prev(Variable): A 2D Tensor representing the cell value from
            previous step. It has the same shape and data type with ``hidden_t_prev`` .
        forget_bias (float, optional): :math:`forget\\_bias` added to the biases
            of the forget gate. Default 0.
        param_attr(ParamAttr, optional):  To specify the weight parameter property.
            Default: None, which means the default weight parameter property is used.
            See usage for details in :ref:`api_fluid_ParamAttr` .
        bias_attr (ParamAttr, optional): To specify the bias parameter property.
            Default: None, which means the default bias parameter property is used.
            See usage for details in :ref:`api_fluid_ParamAttr` .
2231 2232
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266
            None by default.

    Returns:
        tuple: The tuple contains two Tensor variables with the same shape and \
            data type with ``hidden_t_prev`` , representing the hidden value and \
            cell value which correspond to :math:`h_{t}` and :math:`c_{t}` in \
            the formula.

    Raises:
        ValueError: Rank of x_t must be 2.
        ValueError: Rank of hidden_t_prev must be 2.
        ValueError: Rank of cell_t_prev must be 2.
        ValueError: The 1st dimensions of x_t, hidden_t_prev and cell_t_prev must be the same.
        ValueError: The 2nd dimensions of hidden_t_prev and cell_t_prev must be the same.

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid

            dict_dim, emb_dim, hidden_dim = 128, 64, 512
            data = fluid.data(name='step_data', shape=[None], dtype='int64')
            x = fluid.embedding(input=data, size=[dict_dim, emb_dim])
            pre_hidden = fluid.data(
                name='pre_hidden', shape=[None, hidden_dim], dtype='float32')
            pre_cell = fluid.data(
                name='pre_cell', shape=[None, hidden_dim], dtype='float32')
            hidden = fluid.layers.lstm_unit(
                x_t=x,
                hidden_t_prev=pre_hidden,
                cell_t_prev=pre_cell)
    """
    helper = LayerHelper('lstm_unit', **locals())
X
Xing Wu 已提交
2267
    check_variable_and_dtype(x_t, 'x_t', ['float32', 'float64'], 'lstm_unit')
2268 2269 2270 2271 2272 2273
    check_variable_and_dtype(
        hidden_t_prev, 'hidden_t_prev', ['float32', 'float64'], 'lstm_unit'
    )
    check_variable_and_dtype(
        cell_t_prev, 'cell_t_prev', ['float32', 'float64'], 'lstm_unit'
    )
2274 2275 2276 2277 2278 2279 2280 2281 2282
    if len(x_t.shape) != 2:
        raise ValueError("Rank of x_t must be 2.")

    if len(hidden_t_prev.shape) != 2:
        raise ValueError("Rank of hidden_t_prev must be 2.")

    if len(cell_t_prev.shape) != 2:
        raise ValueError("Rank of cell_t_prev must be 2.")

2283 2284 2285 2286 2287 2288 2289 2290
    if (
        x_t.shape[0] != hidden_t_prev.shape[0]
        or x_t.shape[0] != cell_t_prev.shape[0]
    ):
        raise ValueError(
            "The 1st dimensions of x_t, hidden_t_prev and "
            "cell_t_prev must be the same."
        )
2291 2292

    if hidden_t_prev.shape[1] != cell_t_prev.shape[1]:
2293 2294 2295 2296
        raise ValueError(
            "The 2nd dimensions of hidden_t_prev and "
            "cell_t_prev must be the same."
        )
2297 2298 2299 2300 2301 2302

    if bias_attr is None:
        bias_attr = ParamAttr()

    size = cell_t_prev.shape[1]
    concat_out = nn.concat(input=[x_t, hidden_t_prev], axis=1)
2303 2304 2305 2306 2307 2308
    fc_out = nn.fc(
        input=concat_out,
        size=4 * size,
        param_attr=param_attr,
        bias_attr=bias_attr,
    )
2309 2310 2311 2312
    dtype = x_t.dtype
    c = helper.create_variable_for_type_inference(dtype)
    h = helper.create_variable_for_type_inference(dtype)

2313 2314 2315 2316 2317 2318
    helper.append_op(
        type='lstm_unit',
        inputs={"X": fc_out, "C_prev": cell_t_prev},
        outputs={"C": c, "H": h},
        attrs={"forget_bias": forget_bias},
    )
2319 2320

    return h, c