rnn.py 77.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

F
Feiyu Chan 已提交
15
import math
16
from collections.abc import Sequence
17
from functools import partial, reduce
F
Feiyu Chan 已提交
18

19
import numpy as np
20

F
Feiyu Chan 已提交
21
import paddle
22
from paddle import _C_ops, _legacy_C_ops, framework, in_dynamic_mode
23 24 25
from paddle.fluid.data_feeder import check_type, check_variable_and_dtype
from paddle.fluid.framework import _non_static_mode, in_dygraph_mode
from paddle.fluid.layers import control_flow, sequence_lod, utils
26
from paddle.fluid.layers.utils import flatten, map_structure
Z
zhiboniu 已提交
27
from paddle.framework import core
28 29 30
from paddle.nn import Layer
from paddle.nn import functional as F
from paddle.nn import initializer as I
31
from paddle.static import Variable, default_startup_program, program_guard
L
liu zhengxi 已提交
32
from paddle.tensor.manipulation import tensor_array_to_tensor
33

34
from .container import LayerList
Z
zhiboniu 已提交
35

36 37
__all__ = []

F
Feiyu Chan 已提交
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
def rnn(
    cell,
    inputs,
    initial_states=None,
    sequence_length=None,
    time_major=False,
    is_reverse=False,
    **kwargs
):
    r"""
    rnn creates a recurrent neural network specified by RNNCell `cell`,
    which performs :code:`cell.call()` (for dygraph mode :code:`cell.forward`)
    repeatedly until reaches to the maximum length of `inputs`.

    Parameters:
        cell(RNNCellBase): An instance of `RNNCellBase`.
        inputs(Tensor): the input sequences.
            If time_major is True, the shape is
            `[time_steps, batch_size, input_size]`
            else the shape is `[batch_size, time_steps, input_size]`.
        initial_states(Tensor|tuple|list, optional): the initial state of the
            rnn cell. Tensor or a possibly nested structure of tensors. If not
            provided, `cell.get_initial_states` would be called to produce
            the initial state. Defaults to None.
        sequence_length (Tensor, optional): shape `[batch_size]`, dtype: int64
            or int32. The valid lengths of input sequences. Defaults to None.
            If `sequence_length` is not None, the inputs are treated as
            padded sequences. In each input sequence, elements whose time step
            index are not less than the valid length are treated as paddings.
        time_major (bool, optional): Whether the first dimension of the input means the
            time steps. Defaults to False.
        is_reverse (bool, optional): Indicate whether to calculate in the reverse
            order of input sequences. Defaults to False.
        **kwargs: Additional keyword arguments to pass to `forward` of the cell.

    Returns:
        outputs (Tensor|list|tuple): the output sequence. Tensor or nested
            structure of Tensors.
            If `time_major` is True, the shape of each tensor in outpus is
            `[time_steps, batch_size, hidden_size]`, else
            `[batch_size, time_steps, hidden_size]`.
        final_states (Tensor|list|tuple): final states. A (possibly nested structure of)
            tensor[s], representing the final state for RNN. It has the same
            structure of intial state. Each tensor in final states has the same
            shape and dtype as the corresponding tensor in initial states.

    Examples:

        .. code-block:: python

            import paddle
            paddle.disable_static()

            cell = paddle.nn.SimpleRNNCell(16, 32)

            inputs = paddle.rand((4, 23, 16))
            prev_h = paddle.randn((4, 32))
            outputs, final_states = paddle.nn.layer.rnn(cell, inputs, prev_h)

    """

    if _non_static_mode():
        return _rnn_dynamic_graph(
            cell,
            inputs,
            initial_states,
            sequence_length,
            time_major,
            is_reverse,
            **kwargs
        )
    else:
        return _rnn_static_graph(
            cell,
            inputs,
            initial_states,
            sequence_length,
            time_major,
            is_reverse,
            **kwargs
        )


class ArrayWrapper:
    def __init__(self, x):
        self.array = [x]

    def append(self, x):
        self.array.append(x)
        return self

    def __getitem__(self, item):
        return self.array.__getitem__(item)


def _maybe_copy(state, new_state, step_mask):
    """update rnn state or just pass the old state through"""
    new_state = paddle.tensor.math._multiply_with_axis(
        new_state, step_mask, axis=0
    ) + paddle.tensor.math._multiply_with_axis(state, (1 - step_mask), axis=0)
    return new_state


def _transpose_batch_time(x):
    perm = [1, 0] + list(range(2, len(x.shape)))
    return paddle.transpose(x, perm)


def _rnn_dynamic_graph(
    cell,
    inputs,
    initial_states=None,
    sequence_length=None,
    time_major=False,
    is_reverse=False,
    **kwargs
):
    time_step_index = 0 if time_major else 1
    flat_inputs = flatten(inputs)
    time_steps = flat_inputs[0].shape[time_step_index]

    if initial_states is None:
        initial_states = cell.get_initial_states(
            batch_ref=inputs, batch_dim_idx=1 if time_major else 0
        )

    if not time_major:
        inputs = map_structure(_transpose_batch_time, inputs)

    if sequence_length is not None:
        mask = sequence_lod.sequence_mask(
            sequence_length, maxlen=time_steps, dtype=inputs.dtype
        )
        mask = paddle.transpose(mask, [1, 0])

    if is_reverse:
        inputs = map_structure(lambda x: paddle.reverse(x, axis=[0]), inputs)
        mask = (
            paddle.reverse(mask, axis=[0])
            if sequence_length is not None
            else None
        )

    states = initial_states
    outputs = []
    for i in range(time_steps):
        step_inputs = map_structure(lambda x: x[i], inputs)
        step_outputs, new_states = cell(step_inputs, states, **kwargs)
        if sequence_length is not None:
            new_states = map_structure(
                partial(_maybe_copy, step_mask=mask[i]), states, new_states
            )
        states = new_states
        outputs = (
            map_structure(lambda x: ArrayWrapper(x), step_outputs)
            if i == 0
            else map_structure(
                lambda x, x_array: x_array.append(x), step_outputs, outputs
            )
        )

    final_outputs = map_structure(
        lambda x: paddle.stack(x.array, axis=time_step_index), outputs
    )

    if is_reverse:
        final_outputs = map_structure(
            lambda x: paddle.reverse(x, axis=time_step_index), final_outputs
        )

    final_states = new_states
    return final_outputs, final_states


def _rnn_static_graph(
    cell,
    inputs,
    initial_states=None,
    sequence_length=None,
    time_major=False,
    is_reverse=False,
    **kwargs
):
    check_type(inputs, 'inputs', (Variable, list, tuple), 'rnn')
    if isinstance(inputs, (list, tuple)):
        for i, input_x in enumerate(inputs):
            check_variable_and_dtype(
                input_x, 'inputs[' + str(i) + ']', ['float32', 'float64'], 'rnn'
            )
    check_type(
        initial_states,
        'initial_states',
        (Variable, list, tuple, type(None)),
        'rnn',
    )

    check_type(
        sequence_length, 'sequence_length', (Variable, type(None)), 'rnn'
    )

    def _switch_grad(x, stop=False):
        x.stop_gradient = stop
        return x

    if initial_states is None:
        initial_states = cell.get_initial_states(
            batch_ref=inputs, batch_dim_idx=1 if time_major else 0
        )
    initial_states = map_structure(_switch_grad, initial_states)

    if not time_major:
        inputs = map_structure(_transpose_batch_time, inputs)

H
hong 已提交
252
    max_seq_len = paddle.shape(flatten(inputs)[0])[0]
253 254 255 256 257 258 259 260 261 262 263
    if sequence_length:
        mask = sequence_lod.sequence_mask(
            sequence_length,
            maxlen=max_seq_len,
            dtype=flatten(initial_states)[0].dtype,
        )
        mask = paddle.transpose(mask, [1, 0])
    if is_reverse:
        inputs = map_structure(lambda x: paddle.reverse(x, axis=[0]), inputs)
        mask = paddle.reverse(mask, axis=[0]) if sequence_length else None

H
hong 已提交
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
    with paddle.fluid.framework.device_guard("cpu"):
        start_i = paddle.zeros([1], dtype="int64")
        end = max_seq_len

        end = paddle.cast(end, "int64")
        cond = start_i < end
    while_op = control_flow.While(cond)

    out_array = paddle.tensor.create_array(dtype=flatten(inputs)[0].dtype)

    init_array = map_structure(
        lambda x: paddle.tensor.create_array(dtype=x.dtype), initial_states
    )

    map_structure(
        lambda x, y: paddle.tensor.array_write(x, start_i, y),
        initial_states,
        init_array,
    )

    with while_op.block():

        step_in = inputs[start_i]
        # step_in = paddle.fluid.layers.Print( step_in, message="step in")
        pre_state = map_structure(
            lambda x: paddle.tensor.array_read(x, start_i), init_array
        )
        # pre_state = paddle.fluid.layers.Print( pre_state, message="pre")
        outputs, new_states = cell(step_in, pre_state, **kwargs)
        assert isinstance(outputs, paddle.fluid.framework.Variable)
        utils.assert_same_structure(new_states, pre_state)
295
        if sequence_length:
H
hong 已提交
296 297 298 299 300 301
            step_mask = paddle.unsqueeze(mask[start_i], 1)
            # paddle.fluid.layers.Print( step_mask, message="mask")
            # new_states = map_structure(
            #     partial(_maybe_copy, step_mask=step_mask),
            #     pre_state, new_states
            # )
302
            new_states = map_structure(
H
hong 已提交
303 304 305
                lambda x, y: (x * step_mask + y * (1.0 - step_mask)),
                new_states,
                pre_state,
306 307
            )

H
hong 已提交
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
        paddle.tensor.array_write(outputs, start_i, out_array)

        with paddle.fluid.framework.device_guard("cpu"):

            start_i = paddle.tensor.increment(x=start_i, value=1)
        map_structure(
            lambda x, y: paddle.tensor.array_write(x, start_i, y),
            new_states,
            init_array,
        )

        with paddle.fluid.framework.device_guard("cpu"):
            new_cond = paddle.tensor.less_than(start_i, end)
            paddle.fluid.layers.assign(new_cond, cond)

L
liu zhengxi 已提交
323
    out, _ = tensor_array_to_tensor(out_array, axis=0, use_stack=True)
324

H
hong 已提交
325
    all_state = map_structure(
L
liu zhengxi 已提交
326
        lambda x: tensor_array_to_tensor(x, axis=0, use_stack=True)[0],
H
hong 已提交
327 328 329 330
        init_array,
    )
    final_outputs = out
    final_states = map_structure(lambda x: x[-1], all_state)
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443

    if is_reverse:
        final_outputs = map_structure(
            lambda x: paddle.reverse(x, axis=[0]), final_outputs
        )

    if not time_major:
        final_outputs = map_structure(_transpose_batch_time, final_outputs)

    return (final_outputs, final_states)


def birnn(
    cell_fw,
    cell_bw,
    inputs,
    initial_states=None,
    sequence_length=None,
    time_major=False,
    **kwargs
):
    r"""
    birnn creates a bidirectional recurrent neural network specified by
    RNNCell `cell_fw` and `cell_bw`, which performs :code:`cell.call()`
    (for dygraph mode :code:`cell.forward`) repeatedly until reaches to
    the maximum length of `inputs` and then concat the outputs for both RNNs
    along the last axis.

    Parameters:
        cell_fw(RNNCellBase): An instance of `RNNCellBase`.
        cell_bw(RNNCellBase): An instance of `RNNCellBase`.
        inputs(Tensor): the input sequences.
            If time_major is True, the shape is
            `[time_steps, batch_size, input_size]`
            else the shape is `[batch_size, time_steps, input_size]`.
        initial_states(tuple, optional): A tuple of initial states of
            `cell_fw` and `cell_bw`.
            If not provided, `cell.get_initial_states` would be called to
            produce initial state for each cell. Defaults to None.
        sequence_length (Tensor, optional): shape `[batch_size]`, dtype: int64
            or int32. The valid lengths of input sequences. Defaults to None.
            If `sequence_length` is not None, the inputs are treated as
            padded sequences. In each input sequence, elements whose time step
            index are not less than the valid length are treated as paddings.
        time_major (bool): Whether the first dimension of the input means the
            time steps. Defaults to False.
        **kwargs: Additional keyword arguments to pass to `forward` of each cell.

    Returns:
        outputs (Tensor): the outputs of the bidirectional RNN. It is the
            concatenation of the outputs from the forward RNN and backward
            RNN along the last axis.
            If time major is True, the shape is `[time_steps, batch_size, size]`,
            else the shape is `[batch_size, time_steps, size]`, where size is
            `cell_fw.hidden_size + cell_bw.hidden_size`.
        final_states (tuple): A tuple of the final states of the forward
            cell and backward cell.

    Examples:

        .. code-block:: python

            import paddle
            paddle.disable_static()

            cell_fw = paddle.nn.LSTMCell(16, 32)
            cell_bw = paddle.nn.LSTMCell(16, 32)

            inputs = paddle.rand((4, 23, 16))
            hf, cf = paddle.rand((4, 32)), paddle.rand((4, 32))
            hb, cb = paddle.rand((4, 32)), paddle.rand((4, 32))
            initial_states = ((hf, cf), (hb, cb))
            outputs, final_states = paddle.nn.layer.birnn(
                cell_fw, cell_bw, inputs, initial_states)

    """

    if initial_states is None:
        states_fw = cell_fw.get_initial_states(
            batch_ref=inputs, batch_dim_idx=1 if time_major else 0
        )
        states_bw = cell_fw.get_initial_states(
            batch_ref=inputs, batch_dim_idx=1 if time_major else 0
        )
    else:
        states_fw, states_bw = initial_states
    outputs_fw, states_fw = rnn(
        cell_fw,
        inputs,
        states_fw,
        sequence_length,
        time_major=time_major,
        **kwargs
    )

    outputs_bw, states_bw = rnn(
        cell_bw,
        inputs,
        states_bw,
        sequence_length,
        time_major=time_major,
        is_reverse=True,
        **kwargs
    )

    outputs = map_structure(
        lambda x, y: paddle.concat([x, y], -1), outputs_fw, outputs_bw
    )

    final_states = (states_fw, states_bw)
    return outputs, final_states


F
Feiyu Chan 已提交
444 445 446 447 448
def split_states(states, bidirectional=False, state_components=1):
    r"""
    Split states of RNN network into possibly nested list or tuple of
    states of each RNN cells of the RNN network.

449
    Parameters:
F
Feiyu Chan 已提交
450 451
        states (Tensor|tuple|list): the concatenated states for RNN network.
            When `state_components` is 1, states in a Tensor with shape
452 453 454 455 456 457 458 459 460 461 462
            `(L*D, N, C)` where `L` is the number of layers of the RNN
            network, `D` is the number of directions of the RNN network(1
            for unidirectional RNNs and 2 for bidirectional RNNs), `N` is
            the batch size of the input to the RNN network, `C` is the
            hidden size of the RNN network.

            When `state_components` is larger than 1, `states` is a tuple of
            `state_components` Tensors that meet the requirements described
            above.

            For SimpleRNNs and GRUs, `state_components` is 1, and for LSTMs,
F
Feiyu Chan 已提交
463
            `state_components` is 2.
464
        bidirectional (bool): whether the state is of a bidirectional RNN
F
Feiyu Chan 已提交
465 466 467
            network. Defaults to False.
        state_components (int): the number of the components of the states. see
            `states` above. Defaults to 1.
468

F
Feiyu Chan 已提交
469
    Returns:
470 471 472
        A nested list or tuple of RNN cell states.
        If `bidirectional` is True, it can be indexed twice to get an RNN
        cell state. The first index indicates the layer, the second index
F
Feiyu Chan 已提交
473 474 475 476
        indicates the direction.
        If `bidirectional` is False, it can be indexed once to get an RNN
        cell state. The index indicates the layer.
        Note that if `state_components` is larger than 1, an RNN cell state
477
        can be indexed one more time to get a tensor of shape(N, C), where
F
Feiyu Chan 已提交
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
        `N` is the batch size of the input to the RNN cell, and `C` is the
        hidden size of the RNN cell.
    """
    if state_components == 1:
        states = paddle.unstack(states)
        if not bidirectional:
            return states
        else:
            return list(zip(states[::2], states[1::2]))
    else:
        assert len(states) == state_components
        states = tuple([paddle.unstack(item) for item in states])
        if not bidirectional:
            return list(zip(*states))
        else:
            states = list(zip(*states))
            return list(zip(states[::2], states[1::2]))


def concat_states(states, bidirectional=False, state_components=1):
    r"""
499
    Concatenate a possibly nested list or tuple of RNN cell states into a
F
Feiyu Chan 已提交
500 501
    compact form.

502
    Parameters:
503 504 505 506
        states (list|tuple): a possibly nested list or tuple of RNN cell
            states.
            If `bidirectional` is True, it can be indexed twice to get an
            RNN cell state. The first index indicates the layer, the second
F
Feiyu Chan 已提交
507 508 509
            index indicates the direction.
            If `bidirectional` is False, it can be indexed once to get an RNN
            cell state. The index indicates the layer.
510 511 512 513 514
            Note that if `state_components` is larger than 1, an RNN cell
            state can be indexed one more time to get a tensor of shape(N, C),
            where `N` is the batch size of the input to the RNN cell, and
            `C` is the hidden size of the RNN cell.
        bidirectional (bool): whether the state is of a bidirectional RNN
F
Feiyu Chan 已提交
515 516 517
            network. Defaults to False.
        state_components (int): the number of the components of the states. see
            `states` above. Defaults to 1.
518

F
Feiyu Chan 已提交
519 520 521
    Returns:
        Concatenated states for RNN network.
        When `state_components` is 1, states in a Tensor with shape
522 523 524 525
        `(L\*D, N, C)` where `L` is the number of layers of the RNN
        network, `D` is the number of directions of the RNN network(1 for
        unidirectional RNNs and 2 for bidirectional RNNs), `N` is the batch
        size of the input to the RNN network, `C` is the hidden size of the
F
Feiyu Chan 已提交
526
        RNN network.
527

F
Feiyu Chan 已提交
528 529 530 531 532 533 534 535
    """
    if state_components == 1:
        return paddle.stack(flatten(states))
    else:
        states = flatten(states)
        componnets = []
        for i in range(state_components):
            componnets.append(states[i::state_components])
536
        return tuple([paddle.stack(item) for item in componnets])
F
Feiyu Chan 已提交
537 538 539 540 541 542 543 544 545


class RNNCellBase(Layer):
    r"""
    RNNCellBase is the base class for abstraction representing the calculations
    mapping the input and state to the output and new state. It is suitable to
    and mostly used in RNN.
    """

546 547 548
    def get_initial_states(
        self, batch_ref, shape=None, dtype=None, init_value=0.0, batch_dim_idx=0
    ):
F
Feiyu Chan 已提交
549 550 551
        r"""
        Generate initialized states according to provided shape, data type and
        value.
552 553

        Parameters:
554 555 556
            batch_ref (Tensor): A tensor, which shape would be used to
                determine the batch size, which is used to generate initial
                states. For `batch_ref`'s shape d, `d[batch_dim_idx]` is
F
Feiyu Chan 已提交
557
                treated as batch size.
558 559 560 561
            shape (list|tuple, optional): A (possibly nested structure of) shape[s],
                where a shape is a list/tuple of integer. `-1` (for batch size)
                will be automatically prepended if a shape does not starts with
                it. If None, property `state_shape` will be used. Defaults to
F
Feiyu Chan 已提交
562
                None.
563 564 565 566 567
            dtype (str|list|tuple, optional): A (possibly nested structure of)
                data type[s]. The structure must be same as that of `shape`,
                except when all tensors' in states has the same data type, a
                single data type can be used. If None and property `cell.state_shape`
                is not available, current default floating type of paddle is
F
Feiyu Chan 已提交
568
                used. Defaults to None.
569
            init_value (float, optional): A float value used to initialize states.
F
Feiyu Chan 已提交
570
                Defaults to 0.
571
            batch_dim_idx (int, optional): An integer indicating which
F
Feiyu Chan 已提交
572
                dimension of the of `batch_ref` represents batch. Defaults to 0.
573

F
Feiyu Chan 已提交
574
        Returns:
575
            init_states (Tensor|tuple|list): tensor of the provided shape and
F
Feiyu Chan 已提交
576 577 578 579 580 581 582 583
                dtype, or list of tensors that each satisfies the requirements,
                packed in the same structure as `shape` and `type` does.
        """
        # TODO: use inputs and batch_size
        batch_ref = flatten(batch_ref)[0]

        def _is_shape_sequence(seq):
            """For shape, list/tuple of integer is the finest-grained objection"""
584 585 586 587
            if isinstance(seq, list) or isinstance(seq, tuple):
                if reduce(
                    lambda flag, x: isinstance(x, int) and flag, seq, True
                ):
F
Feiyu Chan 已提交
588 589 590 591
                    return False
            # TODO: Add check for the illegal
            if isinstance(seq, dict):
                return True
592
            return isinstance(seq, Sequence) and not isinstance(seq, str)
F
Feiyu Chan 已提交
593

594
        class Shape:
F
Feiyu Chan 已提交
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
            def __init__(self, shape):
                self.shape = shape if shape[0] == -1 else ([-1] + list(shape))

        # nested structure of shapes
        states_shapes = self.state_shape if shape is None else shape
        is_sequence_ori = utils.is_sequence
        utils.is_sequence = _is_shape_sequence
        states_shapes = map_structure(lambda shape: Shape(shape), states_shapes)
        utils.is_sequence = is_sequence_ori

        # nested structure of dtypes
        try:
            states_dtypes = self.state_dtype if dtype is None else dtype
        except NotImplementedError:
            states_dtypes = framework.get_default_dtype()
        if len(flatten(states_dtypes)) == 1:
            dtype = flatten(states_dtypes)[0]
            states_dtypes = map_structure(lambda shape: dtype, states_shapes)

        init_states = map_structure(
615 616 617 618 619 620 621 622 623 624
            lambda shape, dtype: paddle.fluid.layers.fill_constant_batch_size_like(
                input=batch_ref,
                shape=shape.shape,
                dtype=dtype,
                value=init_value,
                input_dim_idx=batch_dim_idx,
            ),
            states_shapes,
            states_dtypes,
        )
F
Feiyu Chan 已提交
625 626 627 628 629 630 631
        return init_states

    @property
    def state_shape(self):
        r"""
        Abstract method (property).
        Used to initialize states.
632
        A (possiblely nested structure of) shape[s], where a shape is a
F
Feiyu Chan 已提交
633 634 635 636 637 638 639
        list/tuple of integers (-1 for batch size would be automatically
        inserted into a shape if shape is not started with it).
        Not necessary to be implemented if states are not initialized by
        `get_initial_states` or the `shape` argument is provided when using
        `get_initial_states`.
        """
        raise NotImplementedError(
640 641
            "Please add implementaion for `state_shape` in the used cell."
        )
F
Feiyu Chan 已提交
642 643 644 645 646 647 648 649 650 651 652 653 654 655

    @property
    def state_dtype(self):
        r"""
        Abstract method (property).
        Used to initialize states.
        A (possiblely nested structure of) data types[s]. The structure must be
        same as that of `shape`, except when all tensors' in states has the same
        data type, a signle data type can be used.
        Not necessary to be implemented if states are not initialized
        by `get_initial_states` or the `dtype` argument is provided when using
        `get_initial_states`.
        """
        raise NotImplementedError(
656 657
            "Please add implementaion for `state_dtype` in the used cell."
        )
F
Feiyu Chan 已提交
658 659 660 661


class SimpleRNNCell(RNNCellBase):
    r"""
662
    Elman RNN (SimpleRNN) cell. Given the inputs and previous states, it
F
Feiyu Chan 已提交
663 664 665 666 667
    computes the outputs and updates states.

    The formula used is as follows:

    .. math::
668
        h_{t} & = act(W_{ih}x_{t} + b_{ih} + W_{hh}h_{t-1} + b_{hh})
669

F
Feiyu Chan 已提交
670
        y_{t} & = h_{t}
671

672
    where :math:`act` is for :attr:`activation`.
F
Feiyu Chan 已提交
673

674
    Please refer to `Finding Structure in Time
F
Feiyu Chan 已提交
675
    <https://crl.ucsd.edu/~elman/Papers/fsit.pdf>`_ for more details.
676

677
    Parameters:
F
Feiyu Chan 已提交
678 679
        input_size (int): The input size.
        hidden_size (int): The hidden size.
680
        activation (str, optional): The activation in the SimpleRNN cell.
F
Feiyu Chan 已提交
681
            It can be `tanh` or `relu`. Defaults to `tanh`.
682
        weight_ih_attr (ParamAttr, optional): The parameter attribute for
683
            :math:`weight_ih`. Default: None.
684
        weight_hh_attr(ParamAttr, optional): The parameter attribute for
685
            :math:`weight_hh`. Default: None.
686
        bias_ih_attr (ParamAttr, optional): The parameter attribute for the
687
            :math:`bias_ih`. Default: None.
688
        bias_hh_attr (ParamAttr, optional): The parameter attribute for the
689
            :math:`bias_hh`. Default: None.
690
        name (str, optional): Name for the operation (optional, default is
F
Feiyu Chan 已提交
691 692
            None). For more information, please refer to :ref:`api_guide_Name`.

693 694 695 696 697
    Variables:
        - **weight_ih** (Parameter): shape (hidden_size, input_size), input to hidden weight, corresponding to :math:`W_{ih}` in the formula.
        - **weight_hh** (Parameter): shape (hidden_size, hidden_size), hidden to hidden weight, corresponding to :math:`W_{hh}` in the formula.
        - **bias_ih** (Parameter): shape (hidden_size, ), input to hidden bias, corresponding to :math:`b_{ih}` in the formula.
        - **bias_hh** (Parameter): shape (hidden_size, ), hidden to hidden bias, corresponding to :math:`b_{hh}` in the formula.
698

F
Feiyu Chan 已提交
699
    Inputs:
700 701
        - **inputs** (Tensor): shape `[batch_size, input_size]`, the input, corresponding to :math:`x_{t}` in the formula.
        - **states** (Tensor, optional): shape `[batch_size, hidden_size]`, the previous hidden state, corresponding to :math:`h_{t-1}` in the formula. When states is None, zero state is used. Defaults to None.
F
Feiyu Chan 已提交
702 703

    Returns:
704 705
        - **outputs** (Tensor): shape `[batch_size, hidden_size]`, the output, corresponding to :math:`h_{t}` in the formula.
        - **states** (Tensor): shape `[batch_size, hidden_size]`, the new hidden state, corresponding to :math:`h_{t}` in the formula.
706

F
Feiyu Chan 已提交
707
    Notes:
708
        All the weights and bias are initialized with `Uniform(-std, std)` by default. Where std = :math:`\frac{1}{\sqrt{hidden\_size}}`. For more information about parameter initialization, please refer to :ref:`api_fluid_ParamAttr`.
F
Feiyu Chan 已提交
709 710 711 712 713 714 715 716 717 718 719 720

    Examples:

        .. code-block:: python

            import paddle

            x = paddle.randn((4, 16))
            prev_h = paddle.randn((4, 32))

            cell = paddle.nn.SimpleRNNCell(16, 32)
            y, h = cell(x, prev_h)
721 722 723
            print(y.shape)

            #[4,32]
F
Feiyu Chan 已提交
724 725 726

    """

727 728 729 730 731 732 733 734 735 736 737
    def __init__(
        self,
        input_size,
        hidden_size,
        activation="tanh",
        weight_ih_attr=None,
        weight_hh_attr=None,
        bias_ih_attr=None,
        bias_hh_attr=None,
        name=None,
    ):
738
        super().__init__()
739 740
        if hidden_size <= 0:
            raise ValueError(
741 742 743 744
                "hidden_size of {} must be greater than 0, but now equals to {}".format(
                    self.__class__.__name__, hidden_size
                )
            )
F
Feiyu Chan 已提交
745 746 747 748
        std = 1.0 / math.sqrt(hidden_size)
        self.weight_ih = self.create_parameter(
            (hidden_size, input_size),
            weight_ih_attr,
749 750
            default_initializer=I.Uniform(-std, std),
        )
F
Feiyu Chan 已提交
751 752 753
        self.weight_hh = self.create_parameter(
            (hidden_size, hidden_size),
            weight_hh_attr,
754 755
            default_initializer=I.Uniform(-std, std),
        )
F
Feiyu Chan 已提交
756
        self.bias_ih = self.create_parameter(
757
            (hidden_size,),
F
Feiyu Chan 已提交
758 759
            bias_ih_attr,
            is_bias=True,
760 761
            default_initializer=I.Uniform(-std, std),
        )
F
Feiyu Chan 已提交
762
        self.bias_hh = self.create_parameter(
763
            (hidden_size,),
F
Feiyu Chan 已提交
764 765
            bias_hh_attr,
            is_bias=True,
766 767
            default_initializer=I.Uniform(-std, std),
        )
F
Feiyu Chan 已提交
768 769 770 771 772 773

        self.input_size = input_size
        self.hidden_size = hidden_size
        if activation not in ["tanh", "relu"]:
            raise ValueError(
                "activation for SimpleRNNCell should be tanh or relu, "
774 775
                "but get {}".format(activation)
            )
F
Feiyu Chan 已提交
776
        self.activation = activation
777
        self._activation_fn = paddle.tanh if activation == "tanh" else F.relu
F
Feiyu Chan 已提交
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793

    def forward(self, inputs, states=None):
        if states is None:
            states = self.get_initial_states(inputs, self.state_shape)
        pre_h = states
        i2h = paddle.matmul(inputs, self.weight_ih, transpose_y=True)
        if self.bias_ih is not None:
            i2h += self.bias_ih
        h2h = paddle.matmul(pre_h, self.weight_hh, transpose_y=True)
        if self.bias_hh is not None:
            h2h += self.bias_hh
        h = self._activation_fn(i2h + h2h)
        return h, h

    @property
    def state_shape(self):
794
        return (self.hidden_size,)
F
Feiyu Chan 已提交
795

796 797
    def extra_repr(self):
        s = '{input_size}, {hidden_size}'
798
        if self.activation != "tanh":
799 800 801
            s += ', activation={activation}'
        return s.format(**self.__dict__)

F
Feiyu Chan 已提交
802 803 804

class LSTMCell(RNNCellBase):
    r"""
805
    Long-Short Term Memory(LSTM) RNN cell. Given the inputs and previous states,
F
Feiyu Chan 已提交
806 807 808 809 810 811
    it computes the outputs and updates states.

    The formula used is as follows:

    .. math::
        i_{t} & = \sigma(W_{ii}x_{t} + b_{ii} + W_{hi}h_{t-1} + b_{hi})
812

F
Feiyu Chan 已提交
813
        f_{t} & = \sigma(W_{if}x_{t} + b_{if} + W_{hf}h_{t-1} + b_{hf})
814

F
Feiyu Chan 已提交
815
        o_{t} & = \sigma(W_{io}x_{t} + b_{io} + W_{ho}h_{t-1} + b_{ho})
816 817 818 819 820 821 822

        \widetilde{c}_{t} & = \tanh (W_{ig}x_{t} + b_{ig} + W_{hg}h_{t-1} + b_{hg})

        c_{t} & = f_{t} * c_{t-1} + i_{t} * \widetilde{c}_{t}

        h_{t} & = o_{t} * \tanh(c_{t})

F
Feiyu Chan 已提交
823 824
        y_{t} & = h_{t}

825
    where :math:`\sigma` is the sigmoid fucntion, and * is the elemetwise
F
Feiyu Chan 已提交
826 827 828 829 830
    multiplication operator.

    Please refer to `An Empirical Exploration of Recurrent Network Architectures
    <http://proceedings.mlr.press/v37/jozefowicz15.pdf>`_ for more details.

831
    Parameters:
F
Feiyu Chan 已提交
832 833
        input_size (int): The input size.
        hidden_size (int): The hidden size.
834
        weight_ih_attr(ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
835
            `weight_ih`. Default: None.
836
        weight_hh_attr(ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
837
            `weight_hh`. Default: None.
838
        bias_ih_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
839
            `bias_ih`. Default: None.
840
        bias_hh_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
841
            `bias_hh`. Default: None.
842
        name (str, optional): Name for the operation (optional, default is
F
Feiyu Chan 已提交
843 844
            None). For more information, please refer to :ref:`api_guide_Name`.

845 846 847 848 849
    Variables:
        - **weight_ih** (Parameter): shape (4 * hidden_size, input_size), input to hidden weight, which corresponds to the concatenation of :math:`W_{ii}, W_{if}, W_{ig}, W_{io}` in the formula.
        - **weight_hh** (Parameter): shape (4 * hidden_size, hidden_size), hidden to hidden weight, which corresponds to the concatenation of :math:`W_{hi}, W_{hf}, W_{hg}, W_{ho}` in the formula.
        - **bias_ih** (Parameter): shape (4 * hidden_size, ), input to hidden bias, which corresponds to the concatenation of :math:`b_{ii}, b_{if}, b_{ig}, b_{io}` in the formula.
        - **bias_hh** (Parameter): shape (4 * hidden_size, ), hidden to hidden bias, swhich corresponds to the concatenation of :math:`b_{hi}, b_{hf}, b_{hg}, b_{ho}` in the formula.
F
Feiyu Chan 已提交
850 851

    Inputs:
852
        - **inputs** (Tensor): shape `[batch_size, input_size]`, the input, corresponding to :math:`x_t` in the formula.
853
        - **states** (list|tuple, optional): a list/tuple of two tensors, each of shape `[batch_size, hidden_size]`, the previous hidden state, corresponding to :math:`h_{t-1}, c_{t-1}` in the formula. When states is None, zero state is used. Defaults to None.
F
Feiyu Chan 已提交
854 855

    Returns:
856 857
        - **outputs** (Tensor): shape `[batch_size, hidden_size]`, the output, corresponding to :math:`h_{t}` in the formula.
        - **states** (tuple): a tuple of two tensors, each of shape `[batch_size, hidden_size]`, the new hidden states, corresponding to :math:`h_{t}, c_{t}` in the formula.
F
Feiyu Chan 已提交
858 859

    Notes:
860 861
        All the weights and bias are initialized with `Uniform(-std, std)` by
        default. Where std = :math:`\frac{1}{\sqrt{hidden\_size}}`. For more
862
        information about parameter initialization, please refer to :ref:`api_fluid_ParamAttr`.
F
Feiyu Chan 已提交
863 864 865 866 867 868 869 870 871 872 873 874 875 876

    Examples:

        .. code-block:: python

            import paddle

            x = paddle.randn((4, 16))
            prev_h = paddle.randn((4, 32))
            prev_c = paddle.randn((4, 32))

            cell = paddle.nn.LSTMCell(16, 32)
            y, (h, c) = cell(x, (prev_h, prev_c))

877 878 879 880 881 882 883 884
            print(y.shape)
            print(h.shape)
            print(c.shape)

            #[4,32]
            #[4,32]
            #[4,32]

F
Feiyu Chan 已提交
885 886
    """

887 888 889 890 891 892 893 894 895 896
    def __init__(
        self,
        input_size,
        hidden_size,
        weight_ih_attr=None,
        weight_hh_attr=None,
        bias_ih_attr=None,
        bias_hh_attr=None,
        name=None,
    ):
897
        super().__init__()
898 899
        if hidden_size <= 0:
            raise ValueError(
900 901 902 903
                "hidden_size of {} must be greater than 0, but now equals to {}".format(
                    self.__class__.__name__, hidden_size
                )
            )
F
Feiyu Chan 已提交
904 905 906 907
        std = 1.0 / math.sqrt(hidden_size)
        self.weight_ih = self.create_parameter(
            (4 * hidden_size, input_size),
            weight_ih_attr,
908 909
            default_initializer=I.Uniform(-std, std),
        )
F
Feiyu Chan 已提交
910 911 912
        self.weight_hh = self.create_parameter(
            (4 * hidden_size, hidden_size),
            weight_hh_attr,
913 914
            default_initializer=I.Uniform(-std, std),
        )
F
Feiyu Chan 已提交
915
        self.bias_ih = self.create_parameter(
916
            (4 * hidden_size,),
F
Feiyu Chan 已提交
917 918
            bias_ih_attr,
            is_bias=True,
919 920
            default_initializer=I.Uniform(-std, std),
        )
F
Feiyu Chan 已提交
921
        self.bias_hh = self.create_parameter(
922
            (4 * hidden_size,),
F
Feiyu Chan 已提交
923 924
            bias_hh_attr,
            is_bias=True,
925 926
            default_initializer=I.Uniform(-std, std),
        )
F
Feiyu Chan 已提交
927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956

        self.hidden_size = hidden_size
        self.input_size = input_size
        self._gate_activation = F.sigmoid
        self._activation = paddle.tanh

    def forward(self, inputs, states=None):
        if states is None:
            states = self.get_initial_states(inputs, self.state_shape)
        pre_hidden, pre_cell = states
        gates = paddle.matmul(inputs, self.weight_ih, transpose_y=True)
        if self.bias_ih is not None:
            gates = gates + self.bias_ih
        gates += paddle.matmul(pre_hidden, self.weight_hh, transpose_y=True)
        if self.bias_hh is not None:
            gates = gates + self.bias_hh

        chunked_gates = paddle.split(gates, num_or_sections=4, axis=-1)

        i = self._gate_activation(chunked_gates[0])
        f = self._gate_activation(chunked_gates[1])
        o = self._gate_activation(chunked_gates[3])
        c = f * pre_cell + i * self._activation(chunked_gates[2])
        h = o * self._activation(c)

        return h, (h, c)

    @property
    def state_shape(self):
        r"""
957 958 959
        The `state_shape` of LSTMCell is a tuple with two shapes:
        `((hidden_size, ), (hidden_size,))`. (-1 for batch size would be
        automatically inserted into shape). These two shapes correspond
F
Feiyu Chan 已提交
960 961
        to :math:`h_{t-1}` and :math:`c_{t-1}` separately.
        """
962
        return ((self.hidden_size,), (self.hidden_size,))
F
Feiyu Chan 已提交
963

964 965 966
    def extra_repr(self):
        return '{input_size}, {hidden_size}'.format(**self.__dict__)

F
Feiyu Chan 已提交
967 968 969

class GRUCell(RNNCellBase):
    r"""
970
    Gated Recurrent Unit (GRU) RNN cell. Given the inputs and previous states,
F
Feiyu Chan 已提交
971 972 973 974
    it computes the outputs and updates states.

    The formula for GRU used is as follows:

975
    ..  math::
F
Feiyu Chan 已提交
976

977
        r_{t} & = \sigma(W_{ir}x_{t} + b_{ir} + W_{hr}h_{t-1} + b_{hr})
978

979
        z_{t} & = \sigma(W_{iz}x_{t} + b_{iz} + W_{hz}h_{t-1} + b_{hz})
980

981
        \widetilde{h}_{t} & = \tanh(W_{ic}x_{t} + b_{ic} + r_{t} * (W_{hc}h_{t-1} + b_{hc}))
982 983 984

        h_{t} & = z_{t} * h_{t-1} + (1 - z_{t}) * \widetilde{h}_{t}

F
Feiyu Chan 已提交
985
        y_{t} & = h_{t}
986 987

    where :math:`\sigma` is the sigmoid fucntion, and * is the elemetwise
F
Feiyu Chan 已提交
988 989 990 991 992 993
    multiplication operator.

    Please refer to `An Empirical Exploration of Recurrent Network Architectures
    <http://proceedings.mlr.press/v37/jozefowicz15.pdf>`_ for more details.

    Parameters:
994
        input_size (int): The input size.
F
Feiyu Chan 已提交
995
        hidden_size (int): The hidden size.
996
        weight_ih_attr(ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
997
            `weight_ih`. Default: None.
998
        weight_hh_attr(ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
999
            `weight_hh`. Default: None.
1000
        bias_ih_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
1001
            `bias_ih`. Default: None.
1002
        bias_hh_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
1003
            `bias_hh`. Default: None.
1004
        name (str, optional): Name for the operation (optional, default is
F
Feiyu Chan 已提交
1005 1006
            None). For more information, please refer to :ref:`api_guide_Name`.

1007 1008 1009 1010 1011
    Variables:
        - **weight_ih** (Parameter): shape (3 * hidden_size, input_size), input to hidden weight, which corresponds to the concatenation of :math:`W_{ir}, W_{iz}, W_{ic}` in the formula.
        - **weight_hh** (Parameter): shape (3 * hidden_size, hidden_size), hidden to hidden weight, which corresponds to the concatenation of :math:`W_{hr}, W_{hz}, W_{hc}` in the formula.
        - **bias_ih** (Parameter): shape (3 * hidden_size, ), input to hidden bias, which corresponds to the concatenation of :math:`b_{ir}, b_{iz}, b_{ic}` in the formula.
        - **bias_hh** (Parameter): shape (3 * hidden_size, ), hidden to hidden bias, swhich corresponds to the concatenation of :math:`b_{hr}, b_{hz}, b_{hc}` in the formula.
F
Feiyu Chan 已提交
1012 1013

    Inputs:
1014 1015
        - **inputs** (Tensor): A tensor with shape `[batch_size, input_size]`, corresponding to :math:`x_t` in the formula.
        - **states** (Tensor): A tensor with shape `[batch_size, hidden_size]`, corresponding to :math:`h_{t-1}` in the formula.
F
Feiyu Chan 已提交
1016 1017

    Returns:
1018 1019
        - **outputs** (Tensor): shape `[batch_size, hidden_size]`, the output, corresponding to :math:`h_{t}` in the formula.
        - **states** (Tensor): shape `[batch_size, hidden_size]`, the new hidden state, corresponding to :math:`h_{t}` in the formula.
1020

F
Feiyu Chan 已提交
1021
    Notes:
1022 1023
        All the weights and bias are initialized with `Uniform(-std, std)` by
        default. Where std = :math:`\frac{1}{\sqrt{hidden\_size}}`. For more
1024
        information about parameter initialization, please refer to s:ref:`api_fluid_ParamAttr`.
F
Feiyu Chan 已提交
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037

    Examples:

        .. code-block:: python

            import paddle

            x = paddle.randn((4, 16))
            prev_h = paddle.randn((4, 32))

            cell = paddle.nn.GRUCell(16, 32)
            y, h = cell(x, prev_h)

1038 1039 1040 1041 1042 1043
            print(y.shape)
            print(h.shape)

            #[4,32]
            #[4,32]

F
Feiyu Chan 已提交
1044 1045
    """

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
    def __init__(
        self,
        input_size,
        hidden_size,
        weight_ih_attr=None,
        weight_hh_attr=None,
        bias_ih_attr=None,
        bias_hh_attr=None,
        name=None,
    ):
1056
        super().__init__()
1057 1058
        if hidden_size <= 0:
            raise ValueError(
1059 1060 1061 1062
                "hidden_size of {} must be greater than 0, but now equals to {}".format(
                    self.__class__.__name__, hidden_size
                )
            )
F
Feiyu Chan 已提交
1063 1064 1065 1066
        std = 1.0 / math.sqrt(hidden_size)
        self.weight_ih = self.create_parameter(
            (3 * hidden_size, input_size),
            weight_ih_attr,
1067 1068
            default_initializer=I.Uniform(-std, std),
        )
F
Feiyu Chan 已提交
1069 1070 1071
        self.weight_hh = self.create_parameter(
            (3 * hidden_size, hidden_size),
            weight_hh_attr,
1072 1073
            default_initializer=I.Uniform(-std, std),
        )
F
Feiyu Chan 已提交
1074
        self.bias_ih = self.create_parameter(
1075
            (3 * hidden_size,),
F
Feiyu Chan 已提交
1076 1077
            bias_ih_attr,
            is_bias=True,
1078 1079
            default_initializer=I.Uniform(-std, std),
        )
F
Feiyu Chan 已提交
1080
        self.bias_hh = self.create_parameter(
1081
            (3 * hidden_size,),
F
Feiyu Chan 已提交
1082 1083
            bias_hh_attr,
            is_bias=True,
1084 1085
            default_initializer=I.Uniform(-std, std),
        )
F
Feiyu Chan 已提交
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120

        self.hidden_size = hidden_size
        self.input_size = input_size
        self._gate_activation = F.sigmoid
        self._activation = paddle.tanh

    def forward(self, inputs, states=None):
        if states is None:
            states = self.get_initial_states(inputs, self.state_shape)

        pre_hidden = states
        x_gates = paddle.matmul(inputs, self.weight_ih, transpose_y=True)
        if self.bias_ih is not None:
            x_gates = x_gates + self.bias_ih
        h_gates = paddle.matmul(pre_hidden, self.weight_hh, transpose_y=True)
        if self.bias_hh is not None:
            h_gates = h_gates + self.bias_hh

        x_r, x_z, x_c = paddle.split(x_gates, num_or_sections=3, axis=1)
        h_r, h_z, h_c = paddle.split(h_gates, num_or_sections=3, axis=1)

        r = self._gate_activation(x_r + h_r)
        z = self._gate_activation(x_z + h_z)
        c = self._activation(x_c + r * h_c)  # apply reset gate after mm
        h = (pre_hidden - c) * z + c

        return h, h

    @property
    def state_shape(self):
        r"""
        The `state_shape` of GRUCell is a shape `[hidden_size]` (-1 for batch
        size would be automatically inserted into shape). The shape corresponds
        to the shape of :math:`h_{t-1}`.
        """
1121
        return (self.hidden_size,)
F
Feiyu Chan 已提交
1122

1123 1124 1125
    def extra_repr(self):
        return '{input_size}, {hidden_size}'.format(**self.__dict__)

F
Feiyu Chan 已提交
1126 1127 1128

class RNN(Layer):
    r"""
1129 1130
    Wrapper for RNN, which creates a recurrent neural network with an RNN cell.
    It performs :code:`cell.forward()` repeatedly until reaches to the maximum
F
Feiyu Chan 已提交
1131 1132
    length of `inputs`.

1133
    Parameters:
F
Feiyu Chan 已提交
1134 1135 1136 1137 1138 1139 1140
        cell(RNNCellBase): An instance of `RNNCellBase`.
        is_reverse (bool, optional): Indicate whether to calculate in the reverse
            order of input sequences. Defaults to False.
        time_major (bool): Whether the first dimension of the input means the
            time steps. Defaults to False.

    Inputs:
1141 1142 1143
        - **inputs** (Tensor): A (possibly nested structure of) tensor[s]. The input sequences. If time major is False, the shape is `[batch_size, time_steps, input_size]`. If time major is True, the shape is `[time_steps, batch_size, input_size]` where `input_size` is the input size of the cell.
        - **initial_states** (Tensor|list|tuple, optional): Tensor of a possibly nested structure of tensors, representing the initial state for the rnn cell. If not provided, `cell.get_initial_states` would be called to produce the initial states. Defaults to None.
        - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None.If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whose time step index are not less than the valid length are treated as paddings.
1144
        - **kwargs**: Additional keyword arguments to pass to `forward` of the cell.
F
Feiyu Chan 已提交
1145 1146

    Returns:
1147 1148
        - **outputs** (Tensor|list|tuple): the output sequences. If `time_major` is True, the shape is `[time_steps, batch_size, hidden_size]`, else `[batch_size, time_steps, hidden_size]`.
        - **final_states** (Tensor|list|tuple): final states of the cell. Tensor or a possibly nested structure of tensors which has the same structure with intial state. Each tensor in final states has the same shape and dtype as the corresponding tensor in initial states.
1149

F
Feiyu Chan 已提交
1150 1151
    Notes:
        This class is a low level API for wrapping rnn cell into a RNN network.
1152 1153
        Users should take care of the state of the cell. If `initial_states` is
        passed to the `forward` method, make sure that it satisfies the
F
Feiyu Chan 已提交
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
        requirements of the cell.

    Examples:

        .. code-block:: python

            import paddle

            inputs = paddle.rand((4, 23, 16))
            prev_h = paddle.randn((4, 32))

            cell = paddle.nn.SimpleRNNCell(16, 32)
            rnn = paddle.nn.RNN(cell)
            outputs, final_states = rnn(inputs, prev_h)

1169 1170 1171 1172 1173 1174
            print(outputs.shape)
            print(final_states.shape)

            #[4,23,32]
            #[4,32]

F
Feiyu Chan 已提交
1175 1176 1177
    """

    def __init__(self, cell, is_reverse=False, time_major=False):
1178
        super().__init__()
F
Feiyu Chan 已提交
1179 1180 1181 1182 1183 1184 1185
        self.cell = cell
        if not hasattr(self.cell, "call"):
            # for non-dygraph mode, `rnn` api uses cell.call
            self.cell.call = self.cell.forward
        self.is_reverse = is_reverse
        self.time_major = time_major

1186 1187 1188
    def forward(
        self, inputs, initial_states=None, sequence_length=None, **kwargs
    ):
1189
        final_outputs, final_states = rnn(
1190 1191 1192 1193 1194 1195
            self.cell,
            inputs,
            initial_states=initial_states,
            sequence_length=sequence_length,
            time_major=self.time_major,
            is_reverse=self.is_reverse,
1196 1197
            **kwargs
        )
F
Feiyu Chan 已提交
1198 1199 1200 1201 1202
        return final_outputs, final_states


class BiRNN(Layer):
    r"""
1203 1204 1205
    Wrapper for bidirectional RNN, which builds a bidiretional RNN given the
    forward rnn cell and backward rnn cell. A BiRNN applies forward RNN and
    backward RNN with coresponding cells separately and concats the outputs
F
Feiyu Chan 已提交
1206 1207
    along the last axis.

1208
    Parameters:
F
Feiyu Chan 已提交
1209 1210 1211 1212 1213 1214
        cell_fw (RNNCellBase): A RNNCellBase instance used for forward RNN.
        cell_bw (RNNCellBase): A RNNCellBase instance used for backward RNN.
        time_major (bool): Whether the first dimension of the input means the
            time steps. Defaults to False.

    Inputs:
1215 1216 1217 1218
        - **inputs** (Tensor): the input sequences of both RNN. If time_major is True, the shape of is `[time_steps, batch_size, input_size]`, else the shape is `[batch_size, time_steps, input_size]`, where input_size is the input size of both cells.
        - **initial_states** (list|tuple, optional): A tuple/list of the initial states of the forward cell and backward cell. Defaults to None. If not provided, `cell.get_initial_states` would be called to produce the initial states for each cell. Defaults to None.
        - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None. If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whose time step index are not less than the valid length are treated as paddings.
        - **kwargs**: Additional keyword arguments. Arguments passed to `forward` for each cell.
F
Feiyu Chan 已提交
1219 1220

    Outputs:
1221
        - **outputs** (Tensor): the outputs of the bidirectional RNN. It is the concatenation of the outputs from the forward RNN and backward RNN along the last axis. If time major is True, the shape is `[time_steps, batch_size, size]`, else the shape is `[batch_size, time_steps, size]`, where size is `cell_fw.hidden_size + cell_bw.hidden_size`.
1222
        - **final_states** (tuple): A tuple of the final states of the forward cell and backward cell.
F
Feiyu Chan 已提交
1223 1224

    Notes:
1225 1226 1227
        This class is a low level API for wrapping rnn cells into a BiRNN
        network. Users should take care of the states of the cells.
        If `initial_states` is passed to the `forward` method, make sure that
F
Feiyu Chan 已提交
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
        it satisfies the requirements of the cells.

    Examples:

        .. code-block:: python

            import paddle

            cell_fw = paddle.nn.LSTMCell(16, 32)
            cell_bw = paddle.nn.LSTMCell(16, 32)
            rnn = paddle.nn.BiRNN(cell_fw, cell_bw)

            inputs = paddle.rand((2, 23, 16))
            outputs, final_states = rnn(inputs)

1243 1244 1245 1246 1247 1248
            print(outputs.shape)
            print(final_states[0][0].shape,len(final_states),len(final_states[0]))

            #[4,23,64]
            #[2,32] 2 2

F
Feiyu Chan 已提交
1249 1250 1251
    """

    def __init__(self, cell_fw, cell_bw, time_major=False):
1252
        super().__init__()
F
Feiyu Chan 已提交
1253 1254 1255
        self.cell_fw = cell_fw
        self.cell_bw = cell_bw
        if cell_fw.input_size != cell_bw.input_size:
1256 1257 1258 1259 1260 1261
            raise ValueError(
                "input size of forward cell({}) does not equals"
                "that of backward cell({})".format(
                    cell_fw.input_size, cell_bw.input_size
                )
            )
F
Feiyu Chan 已提交
1262 1263 1264 1265 1266 1267
        for cell in [self.cell_fw, self.cell_bw]:
            if not hasattr(cell, "call"):
                # for non-dygraph mode, `rnn` api uses cell.call
                cell.call = cell.forward
        self.time_major = time_major

1268 1269 1270
    def forward(
        self, inputs, initial_states=None, sequence_length=None, **kwargs
    ):
F
Feiyu Chan 已提交
1271
        if isinstance(initial_states, (list, tuple)):
1272 1273 1274
            assert (
                len(initial_states) == 2
            ), "length of initial_states should be 2 when it is a list/tuple"
F
Feiyu Chan 已提交
1275

1276
        outputs, final_states = birnn(
1277 1278 1279 1280 1281 1282 1283 1284
            self.cell_fw,
            self.cell_bw,
            inputs,
            initial_states,
            sequence_length,
            self.time_major,
            **kwargs
        )
F
Feiyu Chan 已提交
1285 1286 1287
        return outputs, final_states


1288
class RNNBase(LayerList):
F
Feiyu Chan 已提交
1289
    r"""
1290 1291
    RNNBase class for RNN networks. It provides `forward`, `flatten_parameters`
    and other common methods for SimpleRNN, LSTM and GRU.
F
Feiyu Chan 已提交
1292 1293
    """

1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
    def __init__(
        self,
        mode,
        input_size,
        hidden_size,
        num_layers=1,
        direction="forward",
        time_major=False,
        dropout=0.0,
        weight_ih_attr=None,
        weight_hh_attr=None,
        bias_ih_attr=None,
        bias_hh_attr=None,
    ):
1308
        super().__init__()
1309
        bidirectional_list = ["bidirectional", "bidirect"]
1310 1311 1312 1313
        self.mode = mode
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.dropout = dropout
1314
        self.num_directions = 2 if direction in bidirectional_list else 1
1315 1316 1317 1318 1319 1320 1321 1322
        self.time_major = time_major
        self.num_layers = num_layers
        self.state_components = 2 if mode == "LSTM" else 1

        kwargs = {
            "weight_ih_attr": weight_ih_attr,
            "weight_hh_attr": weight_hh_attr,
            "bias_ih_attr": bias_ih_attr,
1323
            "bias_hh_attr": bias_hh_attr,
1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
        }

        if mode == "LSTM":
            rnn_cls = LSTMCell
        elif mode == "GRU":
            rnn_cls = GRUCell
        else:
            rnn_cls = SimpleRNNCell
            kwargs["activation"] = self.activation

1334 1335
        if direction in ["forward"]:
            is_reverse = False
1336 1337 1338 1339 1340
            cell = rnn_cls(input_size, hidden_size, **kwargs)
            self.append(RNN(cell, is_reverse, time_major))
            for i in range(1, num_layers):
                cell = rnn_cls(hidden_size, hidden_size, **kwargs)
                self.append(RNN(cell, is_reverse, time_major))
1341
        elif direction in bidirectional_list:
1342 1343 1344 1345 1346 1347 1348 1349 1350
            cell_fw = rnn_cls(input_size, hidden_size, **kwargs)
            cell_bw = rnn_cls(input_size, hidden_size, **kwargs)
            self.append(BiRNN(cell_fw, cell_bw, time_major))
            for i in range(1, num_layers):
                cell_fw = rnn_cls(2 * hidden_size, hidden_size, **kwargs)
                cell_bw = rnn_cls(2 * hidden_size, hidden_size, **kwargs)
                self.append(BiRNN(cell_fw, cell_bw, time_major))
        else:
            raise ValueError(
1351
                "direction should be forward or bidirect (or bidirectional), "
1352 1353
                "received direction = {}".format(direction)
            )
1354

1355
        self.could_use_cudnn = True
1356
        self.could_use_cudnn &= len(self.parameters()) == num_layers * 4 * (
1357 1358
            2 if direction in bidirectional_list else 1
        )
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369

        # Expose params as RNN's attribute, which can make it compatible when
        # replacing small ops composed rnn with cpp rnn kernel.
        # Moreover, `jit.to_static` assumes params are added by current layer
        # and wouldn't include sublayer's params in current layer, which also
        # requires these params are added to current layer for `jit.save`.
        param_names = []
        for layer in range(self.num_layers):
            for direction in range(self.num_directions):
                suffix = '_reverse' if direction == 1 else ''
                param_names.extend(['weight_ih_l{}{}', 'weight_hh_l{}{}'])
1370
                if bias_ih_attr is not False:
1371
                    param_names.append('bias_ih_l{}{}')
1372
                if bias_hh_attr is not False:
1373
                    param_names.append('bias_hh_l{}{}')
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
                param_names = [x.format(layer, suffix) for x in param_names]
        for name, param in zip(param_names, self.parameters()):
            setattr(self, name, param)

        self.flatten_parameters()

    def flatten_parameters(self):
        """
        Resets parameter data pointer to address in continuous memory block for
        cudnn usage.
        """
        if self.could_use_cudnn:
            # layer.parameters() is depth first and ordered
            # for i in layer: for j in direct: w_ih, w_hh, b_ih, b_hh
            # need to reorganize to cudnn param layout:
            # all bias following all weights
            params = self.parameters(include_sublayers=False)
            shape = [np.prod(param.shape) for param in params]
            self._all_weights = [None] * len(params)
            for i, param in enumerate(params):
1394 1395 1396 1397 1398
                offset = (
                    0
                    if i % 4 < 2
                    else (2 * self.num_layers * self.num_directions)
                )
1399 1400 1401 1402 1403 1404 1405
                layer_idx = i // 4
                self._all_weights[offset + layer_idx * 2 + i % 2] = param
            # Wrap using a list to avoid registed into params and saving, maybe
            # need a better way to handle this later. Use `create_parameter` to
            # add both to main_program and startup_program for static-graph.
            # Use Constant initializer to avoid make effect on random generator.
            self._flat_weight = [
1406 1407 1408 1409 1410
                self.create_parameter(
                    shape=[np.sum(shape)],
                    dtype=params[0].dtype,
                    default_initializer=I.Constant(0.0),
                )
1411 1412 1413 1414
            ]
            # dropout state may also can be hided and avoid saving
            # should dropout state be persistable for static-graph
            self._dropout_state = self.create_variable(
1415 1416
                dtype=core.VarDesc.VarType.UINT8
            )
Z
zhiboniu 已提交
1417
            if in_dynamic_mode():
1418
                with paddle.no_grad():
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
                    _legacy_C_ops.coalesce_tensor(
                        self._all_weights,
                        self._all_weights,
                        self._flat_weight[0],
                        "copy_data",
                        True,
                        "use_align",
                        False,
                        "dtype",
                        params[0].dtype,
                    )
1430
                    return
1431
            # for static-graph, append coalesce_tensor into startup program
1432 1433 1434
            with program_guard(
                default_startup_program(), default_startup_program()
            ):
Z
zhiboniu 已提交
1435
                with paddle.no_grad():
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
                    self._helper.append_op(
                        type="coalesce_tensor",
                        inputs={"Input": self._all_weights},
                        outputs={
                            "Output": self._all_weights,
                            "FusedOutput": self._flat_weight,
                        },
                        attrs={
                            "copy_data": True,
                            "use_align": False,
                            "dtype": params[0].dtype,
                        },
                    )
1449 1450 1451 1452 1453

    def _cudnn_impl(self, inputs, initial_states, sequence_length):
        if not self.time_major:
            inputs = paddle.tensor.transpose(inputs, [1, 0, 2])

Y
YuanRisheng 已提交
1454 1455
        if in_dygraph_mode():
            out, _, state = _C_ops.rnn(
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
                inputs,
                initial_states,
                self._all_weights,
                sequence_length,
                self._dropout_state,
                self.dropout,
                self.num_directions == 2,
                self.input_size,
                self.hidden_size,
                self.num_layers,
                self.mode,
                0,
                not self.training,
            )
Y
YuanRisheng 已提交
1470
        elif in_dynamic_mode():
1471
            _, _, out, state = _legacy_C_ops.rnn(
1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492
                inputs,
                initial_states,
                self._all_weights,
                sequence_length,
                self._dropout_state,
                self.state_components,
                'dropout_prob',
                self.dropout,
                'is_bidirec',
                self.num_directions == 2,
                'input_size',
                self.input_size,
                'hidden_size',
                self.hidden_size,
                'num_layers',
                self.num_layers,
                'mode',
                self.mode,
                'is_test',
                not self.training,
            )
1493 1494 1495 1496 1497 1498 1499
        else:
            out = self._helper.create_variable_for_type_inference(inputs.dtype)
            state = [
                self._helper.create_variable_for_type_inference(inputs.dtype)
                for i in range(self.state_components)
            ]
            reserve = self._helper.create_variable_for_type_inference(
1500 1501
                dtype=core.VarDesc.VarType.UINT8, stop_gradient=True
            )
1502 1503 1504 1505 1506

            inputs = {
                'Input': inputs,
                'WeightList': self._all_weights,
                'PreState': initial_states,
1507
                'SequenceLength': sequence_length,
1508 1509 1510 1511 1512 1513 1514 1515
            }
            attrs = {
                'dropout_prob': self.dropout,
                'is_bidirec': self.num_directions == 2,
                'input_size': self.input_size,
                'hidden_size': self.hidden_size,
                'num_layers': self.num_layers,
                'mode': self.mode,
1516
                'is_test': not self.training,
1517 1518 1519 1520 1521 1522 1523 1524 1525
            }

            outputs = {
                'Out': out,
                'State': state,
                'Reserve': reserve,
                'DropoutState': self._dropout_state,
            }

1526 1527 1528
            self._helper.append_op(
                type="rnn", inputs=inputs, outputs=outputs, attrs=attrs
            )
1529

1530 1531 1532 1533 1534
        out = (
            paddle.tensor.transpose(out, [1, 0, 2])
            if not self.time_major
            else out
        )
G
Guo Sheng 已提交
1535
        return out, tuple(state) if len(state) > 1 else state[0]
1536

F
Feiyu Chan 已提交
1537 1538 1539 1540
    def forward(self, inputs, initial_states=None, sequence_length=None):
        batch_index = 1 if self.time_major else 0
        dtype = inputs.dtype
        if initial_states is None:
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
            state_shape = (
                self.num_layers * self.num_directions,
                -1,
                self.hidden_size,
            )
            initial_states = tuple(
                [
                    paddle.fluid.layers.fill_constant_batch_size_like(
                        inputs, state_shape, dtype, 0, batch_index, 1
                    )
                    for _ in range(self.state_components)
                ]
            )
1554
        else:
1555 1556 1557 1558 1559 1560 1561 1562 1563
            initial_states = (
                [initial_states]
                if isinstance(initial_states, paddle.static.Variable)
                else initial_states
            )

        if self.could_use_cudnn and (
            not paddle.device.is_compiled_with_rocm() or sequence_length is None
        ):
1564 1565 1566
            # Add CPU kernel and dispatch in backend later
            return self._cudnn_impl(inputs, initial_states, sequence_length)

1567 1568 1569
        states = split_states(
            initial_states, self.num_directions == 2, self.state_components
        )
F
Feiyu Chan 已提交
1570 1571 1572 1573
        final_states = []

        for i, rnn_layer in enumerate(self):
            if i > 0:
1574 1575 1576 1577 1578 1579
                inputs = F.dropout(
                    inputs,
                    self.dropout,
                    training=self.training,
                    mode="upscale_in_train",
                )
F
Feiyu Chan 已提交
1580 1581 1582 1583
            outputs, final_state = rnn_layer(inputs, states[i], sequence_length)
            final_states.append(final_state)
            inputs = outputs

1584 1585 1586
        final_states = concat_states(
            final_states, self.num_directions == 2, self.state_components
        )
F
Feiyu Chan 已提交
1587 1588
        return outputs, final_states

1589 1590 1591 1592
    def extra_repr(self):
        main_str = '{input_size}, {hidden_size}'
        if self.num_layers != 1:
            main_str += ', num_layers={num_layers}'
1593
        if self.time_major is not False:
1594 1595 1596 1597 1598
            main_str += ', time_major={time_major}'
        if self.dropout != 0:
            main_str += ', dropout={dropout}'
        return main_str.format(**self.__dict__)

F
Feiyu Chan 已提交
1599

1600
class SimpleRNN(RNNBase):
F
Feiyu Chan 已提交
1601
    r"""
1602
    Multilayer Elman network(SimpleRNN). It takes input sequences and initial
F
Feiyu Chan 已提交
1603 1604
    states as inputs, and returns the output sequences and the final states.

1605 1606 1607
    Each layer inside the SimpleRNN maps the input sequences and initial states
    to the output sequences and final states in the following manner: at each
    step, it takes step inputs(:math:`x_{t}`) and previous
F
Feiyu Chan 已提交
1608 1609 1610 1611 1612
    states(:math:`h_{t-1}`) as inputs, and returns step outputs(:math:`y_{t}`)
    and new states(:math:`h_{t}`).

    .. math::

1613
        h_{t} & = act(W_{ih}x_{t} + b_{ih} + W_{hh}h_{t-1} + b_{hh})
1614

F
Feiyu Chan 已提交
1615
        y_{t} & = h_{t}
1616

1617
    where :math:`act` is for :attr:`activation`.
1618 1619

    Using key word arguments to construct is recommended.
F
Feiyu Chan 已提交
1620

1621
    Parameters:
1622 1623 1624
        input_size (int): The input size of :math:`x` for the first layer's cell.
        hidden_size (int): The hidden size of :math:`h` for each layer's cell.
        num_layers (int, optional): Number of recurrent layers. Defaults to 1.
1625 1626
        direction (str, optional): The direction of the network. It can be "forward"
            or "bidirect"(or "bidirectional"). When "bidirect", the way to merge
1627
            outputs of forward and backward is concatenating. Defaults to "forward".
1628 1629
        time_major (bool, optional): Whether the first dimension of the input
            means the time steps. If time_major is True, the shape of Tensor is
1630 1631
            [time_steps,batch_size,input_size], otherwise [batch_size, time_steps,input_size].
            Defaults to False. `time_steps` means the length of input sequence.
1632 1633
        dropout (float, optional): The droput probability. Dropout is applied
            to the input of each layer except for the first layer. The range of
1634
            dropout from 0 to 1. Defaults to 0.
1635
        activation (str, optional): The activation in each SimpleRNN cell. It can be
1636
            `tanh` or `relu`. Defaults to `tanh`.
1637
        weight_ih_attr (ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
1638
            `weight_ih` of each cell. Defaults to None.
1639
        weight_hh_attr (ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
1640
            `weight_hh` of each cell. Defaults to None.
1641
        bias_ih_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
1642
            `bias_ih` of each cells. Defaults to None.
1643
        bias_hh_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
1644
            `bias_hh` of each cells. Defaults to None.
1645
        name (str, optional): Name for the operation (optional, default is
F
Feiyu Chan 已提交
1646 1647
            None). For more information, please refer to :ref:`api_guide_Name`.

1648
    Inputs:
1649
        - **inputs** (Tensor): the input sequence. If `time_major` is True, the shape is `[time_steps, batch_size, input_size]`, else, the shape is `[batch_size, time_steps, input_size]`. `time_steps` means the length of the input sequence.
1650 1651
        - **initial_states** (Tensor, optional): the initial state. The shape is `[num_layers * num_directions, batch_size, hidden_size]`. If initial_state is not given, zero initial states are used.
        - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None. If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whose time step index are not less than the valid length are treated as paddings.
F
Feiyu Chan 已提交
1652 1653

    Returns:
1654

1655
        - **outputs** (Tensor): the output sequence. If `time_major` is True, the shape is `[time_steps, batch_size, num_directions * hidden_size]`, else, the shape is `[batch_size, time_steps, num_directions * hidden_size]`. Note that `num_directions` is 2 if direction is "bidirectional" else 1. `time_steps` means the length of the output sequence.
1656

1657
        - **final_states** (Tensor): final states. The shape is `[num_layers * num_directions, batch_size, hidden_size]`. Note that `num_directions` is 2 if direction is "bidirectional" (the index of forward states are 0, 2, 4, 6... and the index of backward states are 1, 3, 5, 7...), else 1.
1658 1659 1660 1661 1662 1663

    Variables:
        - **weight_ih_l[k]**: the learnable input-hidden weights of the k-th layer. If `k = 0`, the shape is `[hidden_size, input_size]`. Otherwise, the shape is `[hidden_size, num_directions * hidden_size]`.
        - **weight_hh_l[k]**: the learnable hidden-hidden weights of the k-th layer, with shape `[hidden_size, hidden_size]`.
        - **bias_ih_l[k]**: the learnable input-hidden bias of the k-th layer, with shape `[hidden_size]`.
        - **bias_hh_l[k]**: the learnable hidden-hidden bias of the k-th layer, with shape `[hidden_size]`.
1664

F
Feiyu Chan 已提交
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
    Examples:

        .. code-block:: python

            import paddle

            rnn = paddle.nn.SimpleRNN(16, 32, 2)

            x = paddle.randn((4, 23, 16))
            prev_h = paddle.randn((2, 4, 32))
            y, h = rnn(x, prev_h)

1677 1678 1679 1680 1681 1682
            print(y.shape)
            print(h.shape)

            #[4,23,32]
            #[2,4,32]

F
Feiyu Chan 已提交
1683 1684
    """

1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
    def __init__(
        self,
        input_size,
        hidden_size,
        num_layers=1,
        direction="forward",
        time_major=False,
        dropout=0.0,
        activation="tanh",
        weight_ih_attr=None,
        weight_hh_attr=None,
        bias_ih_attr=None,
        bias_hh_attr=None,
        name=None,
    ):
1700 1701 1702 1703
        if activation == "tanh":
            mode = "RNN_TANH"
        elif activation == "relu":
            mode = "RNN_RELU"
F
Feiyu Chan 已提交
1704
        else:
1705 1706
            raise ValueError("Unknown activation '{}'".format(activation))
        self.activation = activation
1707
        super().__init__(
1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
            mode,
            input_size,
            hidden_size,
            num_layers,
            direction,
            time_major,
            dropout,
            weight_ih_attr,
            weight_hh_attr,
            bias_ih_attr,
            bias_hh_attr,
        )
F
Feiyu Chan 已提交
1720 1721


1722
class LSTM(RNNBase):
F
Feiyu Chan 已提交
1723
    r"""
1724
    Multilayer LSTM. It takes a sequence and an initial state as inputs, and
F
Feiyu Chan 已提交
1725 1726
    returns the output sequences and the final states.

1727 1728 1729 1730
    Each layer inside the LSTM maps the input sequences and initial states
    to the output sequences and final states in the following manner: at each
    step, it takes step inputs(:math:`x_{t}`) and previous
    states(:math:`h_{t-1}, c_{t-1}`) as inputs, and returns step
F
Feiyu Chan 已提交
1731 1732 1733 1734 1735
    outputs(:math:`y_{t}`) and new states(:math:`h_{t}, c_{t}`).

    .. math::

        i_{t} & = \sigma(W_{ii}x_{t} + b_{ii} + W_{hi}h_{t-1} + b_{hi})
1736

F
Feiyu Chan 已提交
1737
        f_{t} & = \sigma(W_{if}x_{t} + b_{if} + W_{hf}h_{t-1} + b_{hf})
1738

F
Feiyu Chan 已提交
1739
        o_{t} & = \sigma(W_{io}x_{t} + b_{io} + W_{ho}h_{t-1} + b_{ho})
1740 1741 1742 1743 1744 1745 1746

        \widetilde{c}_{t} & = \tanh (W_{ig}x_{t} + b_{ig} + W_{hg}h_{t-1} + b_{hg})

        c_{t} & = f_{t} * c_{t-1} + i_{t} * \widetilde{c}_{t}

        h_{t} & = o_{t} * \tanh(c_{t})

F
Feiyu Chan 已提交
1747 1748
        y_{t} & = h_{t}

1749
    where :math:`\sigma` is the sigmoid fucntion, and * is the elemetwise
F
Feiyu Chan 已提交
1750 1751
    multiplication operator.

1752 1753
    Using key word arguments to construct is recommended.

1754
    Parameters:
1755 1756 1757
        input_size (int): The input size of :math:`x` for the first layer's cell.
        hidden_size (int): The hidden size of :math:`h` for each layer's cell.
        num_layers (int, optional): Number of recurrent layers. Defaults to 1.
1758 1759
        direction (str, optional): The direction of the network. It can be "forward"
            or "bidirect"(or "bidirectional"). When "bidirect", the way to merge
1760
            outputs of forward and backward is concatenating. Defaults to "forward".
1761 1762
        time_major (bool, optional): Whether the first dimension of the input
            means the time steps. If time_major is True, the shape of Tensor is
1763 1764
            [time_steps,batch_size,input_size], otherwise [batch_size, time_steps,input_size].
            Defaults to False. `time_steps` means the length of input sequence.
1765 1766
        dropout (float, optional): The droput probability. Dropout is applied
            to the input of each layer except for the first layer. The range of
1767
            dropout from 0 to 1. Defaults to 0.
1768
        weight_ih_attr (ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
1769
            `weight_ih` of each cell. Default: None.
1770
        weight_hh_attr (ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
1771
            `weight_hh` of each cell. Default: None.
1772
        bias_ih_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
1773
            `bias_ih` of each cells. Default: None.
1774
        bias_hh_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
1775
            `bias_hh` of each cells. Default: None.
1776
        name (str, optional): Name for the operation (optional, default is
F
Feiyu Chan 已提交
1777 1778 1779
            None). For more information, please refer to :ref:`api_guide_Name`.

    Inputs:
1780
        - **inputs** (Tensor): the input sequence. If `time_major` is True, the shape is `[time_steps, batch_size, input_size]`, else, the shape is `[batch_size, time_steps, input_size]`. `time_steps` means the length of the input sequence.
1781
        - **initial_states** (list|tuple, optional): the initial state, a list/tuple of (h, c), the shape of each is `[num_layers * num_directions, batch_size, hidden_size]`. If initial_state is not given, zero initial states are used.
1782
        - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None. If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whos time step index are not less than the valid length are treated as paddings.
F
Feiyu Chan 已提交
1783 1784

    Returns:
1785

1786
        - **outputs** (Tensor): the output sequence. If `time_major` is True, the shape is `[time_steps, batch_size, num_directions * hidden_size]`, If `time_major` is False, the shape is `[batch_size, time_steps, num_directions * hidden_size]`. Note that `num_directions` is 2 if direction is "bidirectional" else 1. `time_steps` means the length of the output sequence.
1787

1788
        - **final_states** (tuple): the final state, a tuple of two tensors, h and c. The shape of each is `[num_layers * num_directions, batch_size, hidden_size]`. Note that `num_directions` is 2 if direction is "bidirectional" (the index of forward states are 0, 2, 4, 6... and the index of backward states are 1, 3, 5, 7...), else 1.
1789 1790 1791 1792 1793 1794

    Variables:
        - **weight_ih_l[k]**: the learnable input-hidden weights of the k-th layer. If `k = 0`, the shape is `[hidden_size, input_size]`. Otherwise, the shape is `[hidden_size, num_directions * hidden_size]`.
        - **weight_hh_l[k]**: the learnable hidden-hidden weights of the k-th layer, with shape `[hidden_size, hidden_size]`.
        - **bias_ih_l[k]**: the learnable input-hidden bias of the k-th layer, with shape `[hidden_size]`.
        - **bias_hh_l[k]**: the learnable hidden-hidden bias of the k-th layer, swith shape `[hidden_size]`.
1795

F
Feiyu Chan 已提交
1796
    Examples:
1797

F
Feiyu Chan 已提交
1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
        .. code-block:: python

            import paddle

            rnn = paddle.nn.LSTM(16, 32, 2)

            x = paddle.randn((4, 23, 16))
            prev_h = paddle.randn((2, 4, 32))
            prev_c = paddle.randn((2, 4, 32))
            y, (h, c) = rnn(x, (prev_h, prev_c))

1809 1810 1811 1812 1813 1814 1815 1816
            print(y.shape)
            print(h.shape)
            print(c.shape)

            #[4,23,32]
            #[2,4,32]
            #[2,4,32]

F
Feiyu Chan 已提交
1817 1818
    """

1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
    def __init__(
        self,
        input_size,
        hidden_size,
        num_layers=1,
        direction="forward",
        time_major=False,
        dropout=0.0,
        weight_ih_attr=None,
        weight_hh_attr=None,
        bias_ih_attr=None,
        bias_hh_attr=None,
        name=None,
    ):
1833
        super().__init__(
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845
            "LSTM",
            input_size,
            hidden_size,
            num_layers,
            direction,
            time_major,
            dropout,
            weight_ih_attr,
            weight_hh_attr,
            bias_ih_attr,
            bias_hh_attr,
        )
F
Feiyu Chan 已提交
1846 1847


1848
class GRU(RNNBase):
F
Feiyu Chan 已提交
1849
    r"""
1850
    Multilayer GRU. It takes input sequencse and initial states as inputs, and
F
Feiyu Chan 已提交
1851 1852
    returns the output sequences and the final states.

1853 1854 1855 1856
    Each layer inside the GRU maps the input sequences and initial states
    to the output sequences and final states in the following manner: at each
    step, it takes step inputs(:math:`x_{t}`) and previous
    states(:math:`h_{t-1}`) as inputs, and returns step outputs(:math:`y_{t}`)
F
Feiyu Chan 已提交
1857 1858 1859 1860
    and new states(:math:`h_{t}`).

    .. math::

1861
        r_{t} & = \sigma(W_{ir}x_{t} + b_{ir} + W_{hr}h_{t-1} + b_{hr})
1862

1863
        z_{t} & = \sigma(W_{iz}x_{t} + b_{iz} + W_{hz}h_{t-1} + b_{hz})
1864

1865
        \widetilde{h}_{t} & = \tanh(W_{ic}x_{t} + b_{ic} + r_{t} * (W_{hc}h_{t-1} + b_{hc}))
1866 1867 1868

        h_{t} & = z_{t} * h_{t-1} + (1 - z_{t}) * \widetilde{h}_{t}

F
Feiyu Chan 已提交
1869 1870
        y_{t} & = h_{t}

1871
    where :math:`\sigma` is the sigmoid fucntion, and * is the elemetwise
F
Feiyu Chan 已提交
1872 1873
    multiplication operator.

1874 1875
    Using key word arguments to construct is recommended.

1876
    Parameters:
1877 1878 1879
        input_size (int): The input size of :math:`x` for the first layer's cell.
        hidden_size (int): The hidden size of :math:`h` for each layer's cell.
        num_layers (int, optional): Number of recurrent layers. Defaults to 1.
1880 1881
        direction (str, optional): The direction of the network. It can be "forward"
            or "bidirect"(or "bidirectional"). When "bidirect", the way to merge
1882
            outputs of forward and backward is concatenating. Defaults to "forward".
1883 1884
        time_major (bool, optional): Whether the first dimension of the input
            means the time steps. If time_major is True, the shape of Tensor is
1885 1886
            [time_steps,batch_size,input_size], otherwise [batch_size, time_steps,input_size].
            Defaults to False. `time_steps` means the length of input sequence.
1887 1888
        dropout (float, optional): The droput probability. Dropout is applied
            to the input of each layer except for the first layer. The range of
1889
            dropout from 0 to 1. Defaults to 0.
1890
        weight_ih_attr (ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
1891
            `weight_ih` of each cell. Default: None.
1892
        weight_hh_attr (ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
1893
            `weight_hh` of each cell. Default: None.
1894
        bias_ih_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
1895
            `bias_ih` of each cells. Default: None.
1896
        bias_hh_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
1897
            `bias_hh` of each cells. Default: None.
1898
        name (str, optional): Name for the operation (optional, default is
F
Feiyu Chan 已提交
1899 1900 1901
            None). For more information, please refer to :ref:`api_guide_Name`.

    Inputs:
1902
        - **inputs** (Tensor): the input sequence. If `time_major` is True, the shape is `[time_steps, batch_size, input_size]`, else, the shape is `[batch_size, time_steps, input_size]`. `time_steps` means the length of the input sequence.
1903 1904
        - **initial_states** (Tensor, optional): the initial state. The shape is `[num_layers * num_directions, batch_size, hidden_size]`. If initial_state is not given, zero initial states are used. Defaults to None.
        - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None. If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whos time step index are not less than the valid length are treated as paddings.
F
Feiyu Chan 已提交
1905 1906

    Returns:
1907

1908
        - **outputs** (Tensor): the output sequence. If `time_major` is True, the shape is `[time_steps, batch_size, num_directions * hidden_size]`, else, the shape is `[batch_size, time_steps, num_directions * hidden_size]`. Note that `num_directions` is 2 if direction is "bidirectional" else 1. `time_steps` means the length of the output sequence.
1909

1910
        - **final_states** (Tensor): final states. The shape is `[num_layers * num_directions, batch_size, hidden_size]`. Note that `num_directions` is 2 if direction is "bidirectional" (the index of forward states are 0, 2, 4, 6... and the index of backward states are 1, 3, 5, 7...), else 1.
1911 1912 1913 1914 1915 1916

    Variables:
        - **weight_ih_l[k]**: the learnable input-hidden weights of the k-th layer. If `k = 0`, the shape is `[hidden_size, input_size]`. Otherwise, the shape is `[hidden_size, num_directions * hidden_size]`.
        - **weight_hh_l[k]**: the learnable hidden-hidden weights of the k-th layer, with shape `[hidden_size, hidden_size]`.
        - **bias_ih_l[k]**: the learnable input-hidden bias of the k-th layer, with shape `[hidden_size]`.
        - **bias_hh_l[k]**: the learnable hidden-hidden bias of the k-th layer, with shape `[hidden_size]`.
1917

F
Feiyu Chan 已提交
1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929
    Examples:

        .. code-block:: python

            import paddle

            rnn = paddle.nn.GRU(16, 32, 2)

            x = paddle.randn((4, 23, 16))
            prev_h = paddle.randn((2, 4, 32))
            y, h = rnn(x, prev_h)

1930 1931 1932 1933 1934 1935
            print(y.shape)
            print(h.shape)

            #[4,23,32]
            #[2,4,32]

F
Feiyu Chan 已提交
1936 1937
    """

1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951
    def __init__(
        self,
        input_size,
        hidden_size,
        num_layers=1,
        direction="forward",
        time_major=False,
        dropout=0.0,
        weight_ih_attr=None,
        weight_hh_attr=None,
        bias_ih_attr=None,
        bias_hh_attr=None,
        name=None,
    ):
1952
        super().__init__(
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
            "GRU",
            input_size,
            hidden_size,
            num_layers,
            direction,
            time_major,
            dropout,
            weight_ih_attr,
            weight_hh_attr,
            bias_ih_attr,
            bias_hh_attr,
        )