rnn.py 64.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

F
Feiyu Chan 已提交
15 16
import six
import math
17
from functools import reduce
F
Feiyu Chan 已提交
18

19
import numpy as np
F
Feiyu Chan 已提交
20 21 22 23
import paddle
from paddle import framework
from paddle.nn import functional as F
from paddle.nn import initializer as I
Z
zhiboniu 已提交
24
from paddle.nn import Layer, LayerList
F
Feiyu Chan 已提交
25
from paddle.fluid.layers import utils
26
from paddle.fluid.layers.utils import flatten, map_structure
27
from paddle import _C_ops, _legacy_C_ops
Z
zhiboniu 已提交
28
from paddle import in_dynamic_mode
29
from paddle.fluid.framework import in_dygraph_mode
Z
zhiboniu 已提交
30 31 32
from paddle.framework import core
from paddle.static import default_startup_program
from paddle.static import program_guard
33 34 35 36
try:
    from collections.abc import Sequence
except:
    from collections import Sequence
Z
zhiboniu 已提交
37

38 39
__all__ = []

F
Feiyu Chan 已提交
40 41 42 43 44 45

def split_states(states, bidirectional=False, state_components=1):
    r"""
    Split states of RNN network into possibly nested list or tuple of
    states of each RNN cells of the RNN network.

46
    Parameters:
F
Feiyu Chan 已提交
47 48
        states (Tensor|tuple|list): the concatenated states for RNN network.
            When `state_components` is 1, states in a Tensor with shape
49 50 51 52 53 54 55 56 57 58 59
            `(L*D, N, C)` where `L` is the number of layers of the RNN
            network, `D` is the number of directions of the RNN network(1
            for unidirectional RNNs and 2 for bidirectional RNNs), `N` is
            the batch size of the input to the RNN network, `C` is the
            hidden size of the RNN network.

            When `state_components` is larger than 1, `states` is a tuple of
            `state_components` Tensors that meet the requirements described
            above.

            For SimpleRNNs and GRUs, `state_components` is 1, and for LSTMs,
F
Feiyu Chan 已提交
60
            `state_components` is 2.
61
        bidirectional (bool): whether the state is of a bidirectional RNN
F
Feiyu Chan 已提交
62 63 64
            network. Defaults to False.
        state_components (int): the number of the components of the states. see
            `states` above. Defaults to 1.
65

F
Feiyu Chan 已提交
66
    Returns:
67 68 69
        A nested list or tuple of RNN cell states.
        If `bidirectional` is True, it can be indexed twice to get an RNN
        cell state. The first index indicates the layer, the second index
F
Feiyu Chan 已提交
70 71 72 73
        indicates the direction.
        If `bidirectional` is False, it can be indexed once to get an RNN
        cell state. The index indicates the layer.
        Note that if `state_components` is larger than 1, an RNN cell state
74
        can be indexed one more time to get a tensor of shape(N, C), where
F
Feiyu Chan 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
        `N` is the batch size of the input to the RNN cell, and `C` is the
        hidden size of the RNN cell.
    """
    if state_components == 1:
        states = paddle.unstack(states)
        if not bidirectional:
            return states
        else:
            return list(zip(states[::2], states[1::2]))
    else:
        assert len(states) == state_components
        states = tuple([paddle.unstack(item) for item in states])
        if not bidirectional:
            return list(zip(*states))
        else:
            states = list(zip(*states))
            return list(zip(states[::2], states[1::2]))


def concat_states(states, bidirectional=False, state_components=1):
    r"""
96
    Concatenate a possibly nested list or tuple of RNN cell states into a
F
Feiyu Chan 已提交
97 98
    compact form.

99
    Parameters:
100 101 102 103
        states (list|tuple): a possibly nested list or tuple of RNN cell
            states.
            If `bidirectional` is True, it can be indexed twice to get an
            RNN cell state. The first index indicates the layer, the second
F
Feiyu Chan 已提交
104 105 106
            index indicates the direction.
            If `bidirectional` is False, it can be indexed once to get an RNN
            cell state. The index indicates the layer.
107 108 109 110 111
            Note that if `state_components` is larger than 1, an RNN cell
            state can be indexed one more time to get a tensor of shape(N, C),
            where `N` is the batch size of the input to the RNN cell, and
            `C` is the hidden size of the RNN cell.
        bidirectional (bool): whether the state is of a bidirectional RNN
F
Feiyu Chan 已提交
112 113 114
            network. Defaults to False.
        state_components (int): the number of the components of the states. see
            `states` above. Defaults to 1.
115

F
Feiyu Chan 已提交
116 117 118
    Returns:
        Concatenated states for RNN network.
        When `state_components` is 1, states in a Tensor with shape
119 120 121 122
        `(L\*D, N, C)` where `L` is the number of layers of the RNN
        network, `D` is the number of directions of the RNN network(1 for
        unidirectional RNNs and 2 for bidirectional RNNs), `N` is the batch
        size of the input to the RNN network, `C` is the hidden size of the
F
Feiyu Chan 已提交
123
        RNN network.
124

F
Feiyu Chan 已提交
125 126 127 128 129 130 131 132
    """
    if state_components == 1:
        return paddle.stack(flatten(states))
    else:
        states = flatten(states)
        componnets = []
        for i in range(state_components):
            componnets.append(states[i::state_components])
133
        return tuple([paddle.stack(item) for item in componnets])
F
Feiyu Chan 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151


class RNNCellBase(Layer):
    r"""
    RNNCellBase is the base class for abstraction representing the calculations
    mapping the input and state to the output and new state. It is suitable to
    and mostly used in RNN.
    """

    def get_initial_states(self,
                           batch_ref,
                           shape=None,
                           dtype=None,
                           init_value=0.,
                           batch_dim_idx=0):
        r"""
        Generate initialized states according to provided shape, data type and
        value.
152 153

        Parameters:
154 155 156
            batch_ref (Tensor): A tensor, which shape would be used to
                determine the batch size, which is used to generate initial
                states. For `batch_ref`'s shape d, `d[batch_dim_idx]` is
F
Feiyu Chan 已提交
157
                treated as batch size.
158 159 160 161
            shape (list|tuple, optional): A (possibly nested structure of) shape[s],
                where a shape is a list/tuple of integer. `-1` (for batch size)
                will be automatically prepended if a shape does not starts with
                it. If None, property `state_shape` will be used. Defaults to
F
Feiyu Chan 已提交
162
                None.
163 164 165 166 167
            dtype (str|list|tuple, optional): A (possibly nested structure of)
                data type[s]. The structure must be same as that of `shape`,
                except when all tensors' in states has the same data type, a
                single data type can be used. If None and property `cell.state_shape`
                is not available, current default floating type of paddle is
F
Feiyu Chan 已提交
168
                used. Defaults to None.
169
            init_value (float, optional): A float value used to initialize states.
F
Feiyu Chan 已提交
170
                Defaults to 0.
171
            batch_dim_idx (int, optional): An integer indicating which
F
Feiyu Chan 已提交
172
                dimension of the of `batch_ref` represents batch. Defaults to 0.
173

F
Feiyu Chan 已提交
174
        Returns:
175
            init_states (Tensor|tuple|list): tensor of the provided shape and
F
Feiyu Chan 已提交
176 177 178 179 180 181 182 183 184
                dtype, or list of tensors that each satisfies the requirements,
                packed in the same structure as `shape` and `type` does.
        """
        # TODO: use inputs and batch_size
        batch_ref = flatten(batch_ref)[0]

        def _is_shape_sequence(seq):
            """For shape, list/tuple of integer is the finest-grained objection"""
            if (isinstance(seq, list) or isinstance(seq, tuple)):
185 186
                if reduce(lambda flag, x: isinstance(x, int) and flag, seq,
                          True):
F
Feiyu Chan 已提交
187 188 189 190
                    return False
            # TODO: Add check for the illegal
            if isinstance(seq, dict):
                return True
191 192
            return (isinstance(seq, Sequence)
                    and not isinstance(seq, six.string_types))
F
Feiyu Chan 已提交
193 194

        class Shape(object):
195

F
Feiyu Chan 已提交
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
            def __init__(self, shape):
                self.shape = shape if shape[0] == -1 else ([-1] + list(shape))

        # nested structure of shapes
        states_shapes = self.state_shape if shape is None else shape
        is_sequence_ori = utils.is_sequence
        utils.is_sequence = _is_shape_sequence
        states_shapes = map_structure(lambda shape: Shape(shape), states_shapes)
        utils.is_sequence = is_sequence_ori

        # nested structure of dtypes
        try:
            states_dtypes = self.state_dtype if dtype is None else dtype
        except NotImplementedError:
            states_dtypes = framework.get_default_dtype()
        if len(flatten(states_dtypes)) == 1:
            dtype = flatten(states_dtypes)[0]
            states_dtypes = map_structure(lambda shape: dtype, states_shapes)

        init_states = map_structure(
216 217 218 219 220 221 222
            lambda shape, dtype: paddle.fluid.layers.
            fill_constant_batch_size_like(input=batch_ref,
                                          shape=shape.shape,
                                          dtype=dtype,
                                          value=init_value,
                                          input_dim_idx=batch_dim_idx),
            states_shapes, states_dtypes)
F
Feiyu Chan 已提交
223 224 225 226 227 228 229
        return init_states

    @property
    def state_shape(self):
        r"""
        Abstract method (property).
        Used to initialize states.
230
        A (possiblely nested structure of) shape[s], where a shape is a
F
Feiyu Chan 已提交
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
        list/tuple of integers (-1 for batch size would be automatically
        inserted into a shape if shape is not started with it).
        Not necessary to be implemented if states are not initialized by
        `get_initial_states` or the `shape` argument is provided when using
        `get_initial_states`.
        """
        raise NotImplementedError(
            "Please add implementaion for `state_shape` in the used cell.")

    @property
    def state_dtype(self):
        r"""
        Abstract method (property).
        Used to initialize states.
        A (possiblely nested structure of) data types[s]. The structure must be
        same as that of `shape`, except when all tensors' in states has the same
        data type, a signle data type can be used.
        Not necessary to be implemented if states are not initialized
        by `get_initial_states` or the `dtype` argument is provided when using
        `get_initial_states`.
        """
        raise NotImplementedError(
            "Please add implementaion for `state_dtype` in the used cell.")


class SimpleRNNCell(RNNCellBase):
    r"""
258
    Elman RNN (SimpleRNN) cell. Given the inputs and previous states, it
F
Feiyu Chan 已提交
259 260 261 262 263
    computes the outputs and updates states.

    The formula used is as follows:

    .. math::
264
        h_{t} & = act(W_{ih}x_{t} + b_{ih} + W_{hh}h_{t-1} + b_{hh})
265

F
Feiyu Chan 已提交
266
        y_{t} & = h_{t}
267

268
    where :math:`act` is for :attr:`activation`.
F
Feiyu Chan 已提交
269

270
    Please refer to `Finding Structure in Time
F
Feiyu Chan 已提交
271
    <https://crl.ucsd.edu/~elman/Papers/fsit.pdf>`_ for more details.
272

273
    Parameters:
F
Feiyu Chan 已提交
274 275
        input_size (int): The input size.
        hidden_size (int): The hidden size.
276
        activation (str, optional): The activation in the SimpleRNN cell.
F
Feiyu Chan 已提交
277
            It can be `tanh` or `relu`. Defaults to `tanh`.
278
        weight_ih_attr (ParamAttr, optional): The parameter attribute for
279
            :math:`weight_ih`. Default: None.
280
        weight_hh_attr(ParamAttr, optional): The parameter attribute for
281
            :math:`weight_hh`. Default: None.
282
        bias_ih_attr (ParamAttr, optional): The parameter attribute for the
283
            :math:`bias_ih`. Default: None.
284
        bias_hh_attr (ParamAttr, optional): The parameter attribute for the
285
            :math:`bias_hh`. Default: None.
286
        name (str, optional): Name for the operation (optional, default is
F
Feiyu Chan 已提交
287 288
            None). For more information, please refer to :ref:`api_guide_Name`.

289 290 291 292 293
    Variables:
        - **weight_ih** (Parameter): shape (hidden_size, input_size), input to hidden weight, corresponding to :math:`W_{ih}` in the formula.
        - **weight_hh** (Parameter): shape (hidden_size, hidden_size), hidden to hidden weight, corresponding to :math:`W_{hh}` in the formula.
        - **bias_ih** (Parameter): shape (hidden_size, ), input to hidden bias, corresponding to :math:`b_{ih}` in the formula.
        - **bias_hh** (Parameter): shape (hidden_size, ), hidden to hidden bias, corresponding to :math:`b_{hh}` in the formula.
294

F
Feiyu Chan 已提交
295
    Inputs:
296 297
        - **inputs** (Tensor): shape `[batch_size, input_size]`, the input, corresponding to :math:`x_{t}` in the formula.
        - **states** (Tensor, optional): shape `[batch_size, hidden_size]`, the previous hidden state, corresponding to :math:`h_{t-1}` in the formula. When states is None, zero state is used. Defaults to None.
F
Feiyu Chan 已提交
298 299

    Returns:
300 301
        - **outputs** (Tensor): shape `[batch_size, hidden_size]`, the output, corresponding to :math:`h_{t}` in the formula.
        - **states** (Tensor): shape `[batch_size, hidden_size]`, the new hidden state, corresponding to :math:`h_{t}` in the formula.
302

F
Feiyu Chan 已提交
303
    Notes:
304
        All the weights and bias are initialized with `Uniform(-std, std)` by default. Where std = :math:`\frac{1}{\sqrt{hidden\_size}}`. For more information about parameter initialization, please refer to :ref:`api_fluid_ParamAttr`.
F
Feiyu Chan 已提交
305 306 307 308 309 310 311 312 313 314 315 316

    Examples:

        .. code-block:: python

            import paddle

            x = paddle.randn((4, 16))
            prev_h = paddle.randn((4, 32))

            cell = paddle.nn.SimpleRNNCell(16, 32)
            y, h = cell(x, prev_h)
317 318 319
            print(y.shape)

            #[4,32]
F
Feiyu Chan 已提交
320 321 322 323 324 325 326 327 328 329 330 331 332

    """

    def __init__(self,
                 input_size,
                 hidden_size,
                 activation="tanh",
                 weight_ih_attr=None,
                 weight_hh_attr=None,
                 bias_ih_attr=None,
                 bias_hh_attr=None,
                 name=None):
        super(SimpleRNNCell, self).__init__()
333 334
        if hidden_size <= 0:
            raise ValueError(
335 336
                "hidden_size of {} must be greater than 0, but now equals to {}"
                .format(self.__class__.__name__, hidden_size))
F
Feiyu Chan 已提交
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
        std = 1.0 / math.sqrt(hidden_size)
        self.weight_ih = self.create_parameter(
            (hidden_size, input_size),
            weight_ih_attr,
            default_initializer=I.Uniform(-std, std))
        self.weight_hh = self.create_parameter(
            (hidden_size, hidden_size),
            weight_hh_attr,
            default_initializer=I.Uniform(-std, std))
        self.bias_ih = self.create_parameter(
            (hidden_size, ),
            bias_ih_attr,
            is_bias=True,
            default_initializer=I.Uniform(-std, std))
        self.bias_hh = self.create_parameter(
            (hidden_size, ),
            bias_hh_attr,
            is_bias=True,
            default_initializer=I.Uniform(-std, std))

        self.input_size = input_size
        self.hidden_size = hidden_size
        if activation not in ["tanh", "relu"]:
            raise ValueError(
                "activation for SimpleRNNCell should be tanh or relu, "
                "but get {}".format(activation))
        self.activation = activation
        self._activation_fn = paddle.tanh \
            if activation == "tanh" \
            else F.relu

    def forward(self, inputs, states=None):
        if states is None:
            states = self.get_initial_states(inputs, self.state_shape)
        pre_h = states
        i2h = paddle.matmul(inputs, self.weight_ih, transpose_y=True)
        if self.bias_ih is not None:
            i2h += self.bias_ih
        h2h = paddle.matmul(pre_h, self.weight_hh, transpose_y=True)
        if self.bias_hh is not None:
            h2h += self.bias_hh
        h = self._activation_fn(i2h + h2h)
        return h, h

    @property
    def state_shape(self):
        return (self.hidden_size, )

385 386
    def extra_repr(self):
        s = '{input_size}, {hidden_size}'
387
        if self.activation != "tanh":
388 389 390
            s += ', activation={activation}'
        return s.format(**self.__dict__)

F
Feiyu Chan 已提交
391 392 393

class LSTMCell(RNNCellBase):
    r"""
394
    Long-Short Term Memory(LSTM) RNN cell. Given the inputs and previous states,
F
Feiyu Chan 已提交
395 396 397 398 399 400
    it computes the outputs and updates states.

    The formula used is as follows:

    .. math::
        i_{t} & = \sigma(W_{ii}x_{t} + b_{ii} + W_{hi}h_{t-1} + b_{hi})
401

F
Feiyu Chan 已提交
402
        f_{t} & = \sigma(W_{if}x_{t} + b_{if} + W_{hf}h_{t-1} + b_{hf})
403

F
Feiyu Chan 已提交
404
        o_{t} & = \sigma(W_{io}x_{t} + b_{io} + W_{ho}h_{t-1} + b_{ho})
405 406 407 408 409 410 411

        \widetilde{c}_{t} & = \tanh (W_{ig}x_{t} + b_{ig} + W_{hg}h_{t-1} + b_{hg})

        c_{t} & = f_{t} * c_{t-1} + i_{t} * \widetilde{c}_{t}

        h_{t} & = o_{t} * \tanh(c_{t})

F
Feiyu Chan 已提交
412 413
        y_{t} & = h_{t}

414
    where :math:`\sigma` is the sigmoid fucntion, and * is the elemetwise
F
Feiyu Chan 已提交
415 416 417 418 419
    multiplication operator.

    Please refer to `An Empirical Exploration of Recurrent Network Architectures
    <http://proceedings.mlr.press/v37/jozefowicz15.pdf>`_ for more details.

420
    Parameters:
F
Feiyu Chan 已提交
421 422
        input_size (int): The input size.
        hidden_size (int): The hidden size.
423
        weight_ih_attr(ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
424
            `weight_ih`. Default: None.
425
        weight_hh_attr(ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
426
            `weight_hh`. Default: None.
427
        bias_ih_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
428
            `bias_ih`. Default: None.
429
        bias_hh_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
430
            `bias_hh`. Default: None.
431
        name (str, optional): Name for the operation (optional, default is
F
Feiyu Chan 已提交
432 433
            None). For more information, please refer to :ref:`api_guide_Name`.

434 435 436 437 438
    Variables:
        - **weight_ih** (Parameter): shape (4 * hidden_size, input_size), input to hidden weight, which corresponds to the concatenation of :math:`W_{ii}, W_{if}, W_{ig}, W_{io}` in the formula.
        - **weight_hh** (Parameter): shape (4 * hidden_size, hidden_size), hidden to hidden weight, which corresponds to the concatenation of :math:`W_{hi}, W_{hf}, W_{hg}, W_{ho}` in the formula.
        - **bias_ih** (Parameter): shape (4 * hidden_size, ), input to hidden bias, which corresponds to the concatenation of :math:`b_{ii}, b_{if}, b_{ig}, b_{io}` in the formula.
        - **bias_hh** (Parameter): shape (4 * hidden_size, ), hidden to hidden bias, swhich corresponds to the concatenation of :math:`b_{hi}, b_{hf}, b_{hg}, b_{ho}` in the formula.
F
Feiyu Chan 已提交
439 440

    Inputs:
441
        - **inputs** (Tensor): shape `[batch_size, input_size]`, the input, corresponding to :math:`x_t` in the formula.
442
        - **states** (list|tuple, optional): a list/tuple of two tensors, each of shape `[batch_size, hidden_size]`, the previous hidden state, corresponding to :math:`h_{t-1}, c_{t-1}` in the formula. When states is None, zero state is used. Defaults to None.
F
Feiyu Chan 已提交
443 444

    Returns:
445 446
        - **outputs** (Tensor): shape `[batch_size, hidden_size]`, the output, corresponding to :math:`h_{t}` in the formula.
        - **states** (tuple): a tuple of two tensors, each of shape `[batch_size, hidden_size]`, the new hidden states, corresponding to :math:`h_{t}, c_{t}` in the formula.
F
Feiyu Chan 已提交
447 448

    Notes:
449 450
        All the weights and bias are initialized with `Uniform(-std, std)` by
        default. Where std = :math:`\frac{1}{\sqrt{hidden\_size}}`. For more
451
        information about parameter initialization, please refer to :ref:`api_fluid_ParamAttr`.
F
Feiyu Chan 已提交
452 453 454 455 456 457 458 459 460 461 462 463 464 465

    Examples:

        .. code-block:: python

            import paddle

            x = paddle.randn((4, 16))
            prev_h = paddle.randn((4, 32))
            prev_c = paddle.randn((4, 32))

            cell = paddle.nn.LSTMCell(16, 32)
            y, (h, c) = cell(x, (prev_h, prev_c))

466 467 468 469 470 471 472 473
            print(y.shape)
            print(h.shape)
            print(c.shape)

            #[4,32]
            #[4,32]
            #[4,32]

F
Feiyu Chan 已提交
474 475 476 477 478 479 480 481 482 483 484
    """

    def __init__(self,
                 input_size,
                 hidden_size,
                 weight_ih_attr=None,
                 weight_hh_attr=None,
                 bias_ih_attr=None,
                 bias_hh_attr=None,
                 name=None):
        super(LSTMCell, self).__init__()
485 486
        if hidden_size <= 0:
            raise ValueError(
487 488
                "hidden_size of {} must be greater than 0, but now equals to {}"
                .format(self.__class__.__name__, hidden_size))
F
Feiyu Chan 已提交
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
        std = 1.0 / math.sqrt(hidden_size)
        self.weight_ih = self.create_parameter(
            (4 * hidden_size, input_size),
            weight_ih_attr,
            default_initializer=I.Uniform(-std, std))
        self.weight_hh = self.create_parameter(
            (4 * hidden_size, hidden_size),
            weight_hh_attr,
            default_initializer=I.Uniform(-std, std))
        self.bias_ih = self.create_parameter(
            (4 * hidden_size, ),
            bias_ih_attr,
            is_bias=True,
            default_initializer=I.Uniform(-std, std))
        self.bias_hh = self.create_parameter(
            (4 * hidden_size, ),
            bias_hh_attr,
            is_bias=True,
            default_initializer=I.Uniform(-std, std))

        self.hidden_size = hidden_size
        self.input_size = input_size
        self._gate_activation = F.sigmoid
        self._activation = paddle.tanh

    def forward(self, inputs, states=None):
        if states is None:
            states = self.get_initial_states(inputs, self.state_shape)
        pre_hidden, pre_cell = states
        gates = paddle.matmul(inputs, self.weight_ih, transpose_y=True)
        if self.bias_ih is not None:
            gates = gates + self.bias_ih
        gates += paddle.matmul(pre_hidden, self.weight_hh, transpose_y=True)
        if self.bias_hh is not None:
            gates = gates + self.bias_hh

        chunked_gates = paddle.split(gates, num_or_sections=4, axis=-1)

        i = self._gate_activation(chunked_gates[0])
        f = self._gate_activation(chunked_gates[1])
        o = self._gate_activation(chunked_gates[3])
        c = f * pre_cell + i * self._activation(chunked_gates[2])
        h = o * self._activation(c)

        return h, (h, c)

    @property
    def state_shape(self):
        r"""
538 539 540
        The `state_shape` of LSTMCell is a tuple with two shapes:
        `((hidden_size, ), (hidden_size,))`. (-1 for batch size would be
        automatically inserted into shape). These two shapes correspond
F
Feiyu Chan 已提交
541 542 543 544
        to :math:`h_{t-1}` and :math:`c_{t-1}` separately.
        """
        return ((self.hidden_size, ), (self.hidden_size, ))

545 546 547
    def extra_repr(self):
        return '{input_size}, {hidden_size}'.format(**self.__dict__)

F
Feiyu Chan 已提交
548 549 550

class GRUCell(RNNCellBase):
    r"""
551
    Gated Recurrent Unit (GRU) RNN cell. Given the inputs and previous states,
F
Feiyu Chan 已提交
552 553 554 555
    it computes the outputs and updates states.

    The formula for GRU used is as follows:

556
    ..  math::
F
Feiyu Chan 已提交
557

558
        r_{t} & = \sigma(W_{ir}x_{t} + b_{ir} + W_{hr}h_{t-1} + b_{hr})
559

560
        z_{t} & = \sigma(W_{iz}x_{t} + b_{iz} + W_{hz}h_{t-1} + b_{hz})
561

562
        \widetilde{h}_{t} & = \tanh(W_{ic}x_{t} + b_{ic} + r_{t} * (W_{hc}h_{t-1} + b_{hc}))
563 564 565

        h_{t} & = z_{t} * h_{t-1} + (1 - z_{t}) * \widetilde{h}_{t}

F
Feiyu Chan 已提交
566
        y_{t} & = h_{t}
567 568

    where :math:`\sigma` is the sigmoid fucntion, and * is the elemetwise
F
Feiyu Chan 已提交
569 570 571 572 573 574
    multiplication operator.

    Please refer to `An Empirical Exploration of Recurrent Network Architectures
    <http://proceedings.mlr.press/v37/jozefowicz15.pdf>`_ for more details.

    Parameters:
575
        input_size (int): The input size.
F
Feiyu Chan 已提交
576
        hidden_size (int): The hidden size.
577
        weight_ih_attr(ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
578
            `weight_ih`. Default: None.
579
        weight_hh_attr(ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
580
            `weight_hh`. Default: None.
581
        bias_ih_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
582
            `bias_ih`. Default: None.
583
        bias_hh_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
584
            `bias_hh`. Default: None.
585
        name (str, optional): Name for the operation (optional, default is
F
Feiyu Chan 已提交
586 587
            None). For more information, please refer to :ref:`api_guide_Name`.

588 589 590 591 592
    Variables:
        - **weight_ih** (Parameter): shape (3 * hidden_size, input_size), input to hidden weight, which corresponds to the concatenation of :math:`W_{ir}, W_{iz}, W_{ic}` in the formula.
        - **weight_hh** (Parameter): shape (3 * hidden_size, hidden_size), hidden to hidden weight, which corresponds to the concatenation of :math:`W_{hr}, W_{hz}, W_{hc}` in the formula.
        - **bias_ih** (Parameter): shape (3 * hidden_size, ), input to hidden bias, which corresponds to the concatenation of :math:`b_{ir}, b_{iz}, b_{ic}` in the formula.
        - **bias_hh** (Parameter): shape (3 * hidden_size, ), hidden to hidden bias, swhich corresponds to the concatenation of :math:`b_{hr}, b_{hz}, b_{hc}` in the formula.
F
Feiyu Chan 已提交
593 594

    Inputs:
595 596
        - **inputs** (Tensor): A tensor with shape `[batch_size, input_size]`, corresponding to :math:`x_t` in the formula.
        - **states** (Tensor): A tensor with shape `[batch_size, hidden_size]`, corresponding to :math:`h_{t-1}` in the formula.
F
Feiyu Chan 已提交
597 598

    Returns:
599 600
        - **outputs** (Tensor): shape `[batch_size, hidden_size]`, the output, corresponding to :math:`h_{t}` in the formula.
        - **states** (Tensor): shape `[batch_size, hidden_size]`, the new hidden state, corresponding to :math:`h_{t}` in the formula.
601

F
Feiyu Chan 已提交
602
    Notes:
603 604
        All the weights and bias are initialized with `Uniform(-std, std)` by
        default. Where std = :math:`\frac{1}{\sqrt{hidden\_size}}`. For more
605
        information about parameter initialization, please refer to s:ref:`api_fluid_ParamAttr`.
F
Feiyu Chan 已提交
606 607 608 609 610 611 612 613 614 615 616 617 618

    Examples:

        .. code-block:: python

            import paddle

            x = paddle.randn((4, 16))
            prev_h = paddle.randn((4, 32))

            cell = paddle.nn.GRUCell(16, 32)
            y, h = cell(x, prev_h)

619 620 621 622 623 624
            print(y.shape)
            print(h.shape)

            #[4,32]
            #[4,32]

F
Feiyu Chan 已提交
625 626 627 628 629 630 631 632 633 634 635
    """

    def __init__(self,
                 input_size,
                 hidden_size,
                 weight_ih_attr=None,
                 weight_hh_attr=None,
                 bias_ih_attr=None,
                 bias_hh_attr=None,
                 name=None):
        super(GRUCell, self).__init__()
636 637
        if hidden_size <= 0:
            raise ValueError(
638 639
                "hidden_size of {} must be greater than 0, but now equals to {}"
                .format(self.__class__.__name__, hidden_size))
F
Feiyu Chan 已提交
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
        std = 1.0 / math.sqrt(hidden_size)
        self.weight_ih = self.create_parameter(
            (3 * hidden_size, input_size),
            weight_ih_attr,
            default_initializer=I.Uniform(-std, std))
        self.weight_hh = self.create_parameter(
            (3 * hidden_size, hidden_size),
            weight_hh_attr,
            default_initializer=I.Uniform(-std, std))
        self.bias_ih = self.create_parameter(
            (3 * hidden_size, ),
            bias_ih_attr,
            is_bias=True,
            default_initializer=I.Uniform(-std, std))
        self.bias_hh = self.create_parameter(
            (3 * hidden_size, ),
            bias_hh_attr,
            is_bias=True,
            default_initializer=I.Uniform(-std, std))

        self.hidden_size = hidden_size
        self.input_size = input_size
        self._gate_activation = F.sigmoid
        self._activation = paddle.tanh

    def forward(self, inputs, states=None):
        if states is None:
            states = self.get_initial_states(inputs, self.state_shape)

        pre_hidden = states
        x_gates = paddle.matmul(inputs, self.weight_ih, transpose_y=True)
        if self.bias_ih is not None:
            x_gates = x_gates + self.bias_ih
        h_gates = paddle.matmul(pre_hidden, self.weight_hh, transpose_y=True)
        if self.bias_hh is not None:
            h_gates = h_gates + self.bias_hh

        x_r, x_z, x_c = paddle.split(x_gates, num_or_sections=3, axis=1)
        h_r, h_z, h_c = paddle.split(h_gates, num_or_sections=3, axis=1)

        r = self._gate_activation(x_r + h_r)
        z = self._gate_activation(x_z + h_z)
        c = self._activation(x_c + r * h_c)  # apply reset gate after mm
        h = (pre_hidden - c) * z + c

        return h, h

    @property
    def state_shape(self):
        r"""
        The `state_shape` of GRUCell is a shape `[hidden_size]` (-1 for batch
        size would be automatically inserted into shape). The shape corresponds
        to the shape of :math:`h_{t-1}`.
        """
        return (self.hidden_size, )

696 697 698
    def extra_repr(self):
        return '{input_size}, {hidden_size}'.format(**self.__dict__)

F
Feiyu Chan 已提交
699 700 701

class RNN(Layer):
    r"""
702 703
    Wrapper for RNN, which creates a recurrent neural network with an RNN cell.
    It performs :code:`cell.forward()` repeatedly until reaches to the maximum
F
Feiyu Chan 已提交
704 705
    length of `inputs`.

706
    Parameters:
F
Feiyu Chan 已提交
707 708 709 710 711 712 713
        cell(RNNCellBase): An instance of `RNNCellBase`.
        is_reverse (bool, optional): Indicate whether to calculate in the reverse
            order of input sequences. Defaults to False.
        time_major (bool): Whether the first dimension of the input means the
            time steps. Defaults to False.

    Inputs:
714 715 716
        - **inputs** (Tensor): A (possibly nested structure of) tensor[s]. The input sequences. If time major is False, the shape is `[batch_size, time_steps, input_size]`. If time major is True, the shape is `[time_steps, batch_size, input_size]` where `input_size` is the input size of the cell.
        - **initial_states** (Tensor|list|tuple, optional): Tensor of a possibly nested structure of tensors, representing the initial state for the rnn cell. If not provided, `cell.get_initial_states` would be called to produce the initial states. Defaults to None.
        - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None.If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whose time step index are not less than the valid length are treated as paddings.
717
        - **kwargs**: Additional keyword arguments to pass to `forward` of the cell.
F
Feiyu Chan 已提交
718 719

    Returns:
720 721
        - **outputs** (Tensor|list|tuple): the output sequences. If `time_major` is True, the shape is `[time_steps, batch_size, hidden_size]`, else `[batch_size, time_steps, hidden_size]`.
        - **final_states** (Tensor|list|tuple): final states of the cell. Tensor or a possibly nested structure of tensors which has the same structure with intial state. Each tensor in final states has the same shape and dtype as the corresponding tensor in initial states.
722

F
Feiyu Chan 已提交
723 724
    Notes:
        This class is a low level API for wrapping rnn cell into a RNN network.
725 726
        Users should take care of the state of the cell. If `initial_states` is
        passed to the `forward` method, make sure that it satisfies the
F
Feiyu Chan 已提交
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
        requirements of the cell.

    Examples:

        .. code-block:: python

            import paddle

            inputs = paddle.rand((4, 23, 16))
            prev_h = paddle.randn((4, 32))

            cell = paddle.nn.SimpleRNNCell(16, 32)
            rnn = paddle.nn.RNN(cell)
            outputs, final_states = rnn(inputs, prev_h)

742 743 744 745 746 747
            print(outputs.shape)
            print(final_states.shape)

            #[4,23,32]
            #[4,32]

F
Feiyu Chan 已提交
748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
    """

    def __init__(self, cell, is_reverse=False, time_major=False):
        super(RNN, self).__init__()
        self.cell = cell
        if not hasattr(self.cell, "call"):
            # for non-dygraph mode, `rnn` api uses cell.call
            self.cell.call = self.cell.forward
        self.is_reverse = is_reverse
        self.time_major = time_major

    def forward(self,
                inputs,
                initial_states=None,
                sequence_length=None,
                **kwargs):
764 765 766 767 768 769 770 771
        final_outputs, final_states = paddle.fluid.layers.rnn(
            self.cell,
            inputs,
            initial_states=initial_states,
            sequence_length=sequence_length,
            time_major=self.time_major,
            is_reverse=self.is_reverse,
            **kwargs)
F
Feiyu Chan 已提交
772 773 774 775 776
        return final_outputs, final_states


class BiRNN(Layer):
    r"""
777 778 779
    Wrapper for bidirectional RNN, which builds a bidiretional RNN given the
    forward rnn cell and backward rnn cell. A BiRNN applies forward RNN and
    backward RNN with coresponding cells separately and concats the outputs
F
Feiyu Chan 已提交
780 781
    along the last axis.

782
    Parameters:
F
Feiyu Chan 已提交
783 784 785 786 787 788
        cell_fw (RNNCellBase): A RNNCellBase instance used for forward RNN.
        cell_bw (RNNCellBase): A RNNCellBase instance used for backward RNN.
        time_major (bool): Whether the first dimension of the input means the
            time steps. Defaults to False.

    Inputs:
789 790 791 792
        - **inputs** (Tensor): the input sequences of both RNN. If time_major is True, the shape of is `[time_steps, batch_size, input_size]`, else the shape is `[batch_size, time_steps, input_size]`, where input_size is the input size of both cells.
        - **initial_states** (list|tuple, optional): A tuple/list of the initial states of the forward cell and backward cell. Defaults to None. If not provided, `cell.get_initial_states` would be called to produce the initial states for each cell. Defaults to None.
        - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None. If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whose time step index are not less than the valid length are treated as paddings.
        - **kwargs**: Additional keyword arguments. Arguments passed to `forward` for each cell.
F
Feiyu Chan 已提交
793 794

    Outputs:
795
        - **outputs** (Tensor): the outputs of the bidirectional RNN. It is the concatenation of the outputs from the forward RNN and backward RNN along the last axis. If time major is True, the shape is `[time_steps, batch_size, size]`, else the shape is `[batch_size, time_steps, size]`, where size is `cell_fw.hidden_size + cell_bw.hidden_size`.
796
        - **final_states** (tuple): A tuple of the final states of the forward cell and backward cell.
F
Feiyu Chan 已提交
797 798

    Notes:
799 800 801
        This class is a low level API for wrapping rnn cells into a BiRNN
        network. Users should take care of the states of the cells.
        If `initial_states` is passed to the `forward` method, make sure that
F
Feiyu Chan 已提交
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
        it satisfies the requirements of the cells.

    Examples:

        .. code-block:: python

            import paddle

            cell_fw = paddle.nn.LSTMCell(16, 32)
            cell_bw = paddle.nn.LSTMCell(16, 32)
            rnn = paddle.nn.BiRNN(cell_fw, cell_bw)

            inputs = paddle.rand((2, 23, 16))
            outputs, final_states = rnn(inputs)

817 818 819 820 821 822
            print(outputs.shape)
            print(final_states[0][0].shape,len(final_states),len(final_states[0]))

            #[4,23,64]
            #[2,32] 2 2

F
Feiyu Chan 已提交
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
    """

    def __init__(self, cell_fw, cell_bw, time_major=False):
        super(BiRNN, self).__init__()
        self.cell_fw = cell_fw
        self.cell_bw = cell_bw
        if cell_fw.input_size != cell_bw.input_size:
            raise ValueError("input size of forward cell({}) does not equals"
                             "that of backward cell({})".format(
                                 cell_fw.input_size, cell_bw.input_size))
        for cell in [self.cell_fw, self.cell_bw]:
            if not hasattr(cell, "call"):
                # for non-dygraph mode, `rnn` api uses cell.call
                cell.call = cell.forward
        self.time_major = time_major

    def forward(self,
                inputs,
                initial_states=None,
                sequence_length=None,
                **kwargs):
        if isinstance(initial_states, (list, tuple)):
            assert len(initial_states) == 2, \
                "length of initial_states should be 2 when it is a list/tuple"

848 849 850
        outputs, final_states = paddle.fluid.layers.birnn(
            self.cell_fw, self.cell_bw, inputs, initial_states, sequence_length,
            self.time_major, **kwargs)
F
Feiyu Chan 已提交
851 852 853
        return outputs, final_states


854
class RNNBase(LayerList):
F
Feiyu Chan 已提交
855
    r"""
856 857
    RNNBase class for RNN networks. It provides `forward`, `flatten_parameters`
    and other common methods for SimpleRNN, LSTM and GRU.
F
Feiyu Chan 已提交
858 859
    """

860 861 862 863 864 865 866 867 868 869 870 871 872
    def __init__(self,
                 mode,
                 input_size,
                 hidden_size,
                 num_layers=1,
                 direction="forward",
                 time_major=False,
                 dropout=0.,
                 weight_ih_attr=None,
                 weight_hh_attr=None,
                 bias_ih_attr=None,
                 bias_hh_attr=None):
        super(RNNBase, self).__init__()
873
        bidirectional_list = ["bidirectional", "bidirect"]
874 875 876 877
        self.mode = mode
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.dropout = dropout
878
        self.num_directions = 2 if direction in bidirectional_list else 1
879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
        self.time_major = time_major
        self.num_layers = num_layers
        self.state_components = 2 if mode == "LSTM" else 1

        kwargs = {
            "weight_ih_attr": weight_ih_attr,
            "weight_hh_attr": weight_hh_attr,
            "bias_ih_attr": bias_ih_attr,
            "bias_hh_attr": bias_hh_attr
        }

        if mode == "LSTM":
            rnn_cls = LSTMCell
        elif mode == "GRU":
            rnn_cls = GRUCell
        else:
            rnn_cls = SimpleRNNCell
            kwargs["activation"] = self.activation

898 899
        if direction in ["forward"]:
            is_reverse = False
900 901 902 903 904
            cell = rnn_cls(input_size, hidden_size, **kwargs)
            self.append(RNN(cell, is_reverse, time_major))
            for i in range(1, num_layers):
                cell = rnn_cls(hidden_size, hidden_size, **kwargs)
                self.append(RNN(cell, is_reverse, time_major))
905
        elif direction in bidirectional_list:
906 907 908 909 910 911 912 913 914
            cell_fw = rnn_cls(input_size, hidden_size, **kwargs)
            cell_bw = rnn_cls(input_size, hidden_size, **kwargs)
            self.append(BiRNN(cell_fw, cell_bw, time_major))
            for i in range(1, num_layers):
                cell_fw = rnn_cls(2 * hidden_size, hidden_size, **kwargs)
                cell_bw = rnn_cls(2 * hidden_size, hidden_size, **kwargs)
                self.append(BiRNN(cell_fw, cell_bw, time_major))
        else:
            raise ValueError(
915
                "direction should be forward or bidirect (or bidirectional), "
916 917
                "received direction = {}".format(direction))

918
        self.could_use_cudnn = True
919
        self.could_use_cudnn &= len(self.parameters()) == num_layers * 4 * (
920
            2 if direction in bidirectional_list else 1)
921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962

        # Expose params as RNN's attribute, which can make it compatible when
        # replacing small ops composed rnn with cpp rnn kernel.
        # Moreover, `jit.to_static` assumes params are added by current layer
        # and wouldn't include sublayer's params in current layer, which also
        # requires these params are added to current layer for `jit.save`.
        param_names = []
        for layer in range(self.num_layers):
            for direction in range(self.num_directions):
                suffix = '_reverse' if direction == 1 else ''
                param_names.extend(['weight_ih_l{}{}', 'weight_hh_l{}{}'])
                if bias_ih_attr != False: param_names.append('bias_ih_l{}{}')
                if bias_hh_attr != False: param_names.append('bias_hh_l{}{}')
                param_names = [x.format(layer, suffix) for x in param_names]
        for name, param in zip(param_names, self.parameters()):
            setattr(self, name, param)

        self.flatten_parameters()

    def flatten_parameters(self):
        """
        Resets parameter data pointer to address in continuous memory block for
        cudnn usage.
        """
        if self.could_use_cudnn:
            # layer.parameters() is depth first and ordered
            # for i in layer: for j in direct: w_ih, w_hh, b_ih, b_hh
            # need to reorganize to cudnn param layout:
            # all bias following all weights
            params = self.parameters(include_sublayers=False)
            shape = [np.prod(param.shape) for param in params]
            self._all_weights = [None] * len(params)
            for i, param in enumerate(params):
                offset = 0 if i % 4 < 2 else (2 * self.num_layers *
                                              self.num_directions)
                layer_idx = i // 4
                self._all_weights[offset + layer_idx * 2 + i % 2] = param
            # Wrap using a list to avoid registed into params and saving, maybe
            # need a better way to handle this later. Use `create_parameter` to
            # add both to main_program and startup_program for static-graph.
            # Use Constant initializer to avoid make effect on random generator.
            self._flat_weight = [
963 964 965
                self.create_parameter(shape=[np.sum(shape)],
                                      dtype=params[0].dtype,
                                      default_initializer=I.Constant(0.0))
966 967 968 969
            ]
            # dropout state may also can be hided and avoid saving
            # should dropout state be persistable for static-graph
            self._dropout_state = self.create_variable(
Z
zhiboniu 已提交
970 971
                dtype=core.VarDesc.VarType.UINT8)
            if in_dynamic_mode():
972
                with paddle.no_grad():
973 974 975 976 977 978
                    _legacy_C_ops.coalesce_tensor(self._all_weights,
                                                  self._all_weights,
                                                  self._flat_weight[0],
                                                  "copy_data", True,
                                                  "use_align", False, "dtype",
                                                  params[0].dtype)
979
                    return
980
            # for static-graph, append coalesce_tensor into startup program
Z
zhiboniu 已提交
981 982
            with program_guard(default_startup_program(),
                               default_startup_program()):
Z
zhiboniu 已提交
983
                with paddle.no_grad():
984 985 986 987 988 989 990 991 992 993 994
                    self._helper.append_op(type="coalesce_tensor",
                                           inputs={"Input": self._all_weights},
                                           outputs={
                                               "Output": self._all_weights,
                                               "FusedOutput": self._flat_weight
                                           },
                                           attrs={
                                               "copy_data": True,
                                               "use_align": False,
                                               "dtype": params[0].dtype
                                           })
995 996 997 998 999

    def _cudnn_impl(self, inputs, initial_states, sequence_length):
        if not self.time_major:
            inputs = paddle.tensor.transpose(inputs, [1, 0, 2])

Y
YuanRisheng 已提交
1000 1001 1002 1003 1004 1005 1006
        if in_dygraph_mode():
            out, _, state = _C_ops.rnn(
                inputs, initial_states, self._all_weights, sequence_length,
                self._dropout_state, self.dropout, self.num_directions == 2,
                self.input_size, self.hidden_size, self.num_layers, self.mode,
                0, not self.training)
        elif in_dynamic_mode():
1007
            _, _, out, state = _legacy_C_ops.rnn(
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
                inputs, initial_states, self._all_weights, sequence_length,
                self._dropout_state, self.state_components, 'dropout_prob',
                self.dropout, 'is_bidirec', self.num_directions == 2,
                'input_size', self.input_size, 'hidden_size', self.hidden_size,
                'num_layers', self.num_layers, 'mode', self.mode, 'is_test',
                not self.training)
        else:
            out = self._helper.create_variable_for_type_inference(inputs.dtype)
            state = [
                self._helper.create_variable_for_type_inference(inputs.dtype)
                for i in range(self.state_components)
            ]
            reserve = self._helper.create_variable_for_type_inference(
Z
zhiboniu 已提交
1021
                dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045

            inputs = {
                'Input': inputs,
                'WeightList': self._all_weights,
                'PreState': initial_states,
                'SequenceLength': sequence_length
            }
            attrs = {
                'dropout_prob': self.dropout,
                'is_bidirec': self.num_directions == 2,
                'input_size': self.input_size,
                'hidden_size': self.hidden_size,
                'num_layers': self.num_layers,
                'mode': self.mode,
                'is_test': not self.training
            }

            outputs = {
                'Out': out,
                'State': state,
                'Reserve': reserve,
                'DropoutState': self._dropout_state,
            }

1046 1047 1048 1049
            self._helper.append_op(type="rnn",
                                   inputs=inputs,
                                   outputs=outputs,
                                   attrs=attrs)
1050 1051 1052

        out = paddle.tensor.transpose(out,
                                      [1, 0, 2]) if not self.time_major else out
G
Guo Sheng 已提交
1053
        return out, tuple(state) if len(state) > 1 else state[0]
1054

F
Feiyu Chan 已提交
1055 1056 1057 1058 1059 1060
    def forward(self, inputs, initial_states=None, sequence_length=None):
        batch_index = 1 if self.time_major else 0
        dtype = inputs.dtype
        if initial_states is None:
            state_shape = (self.num_layers * self.num_directions, -1,
                           self.hidden_size)
1061 1062
            initial_states = tuple([
                paddle.fluid.layers.fill_constant_batch_size_like(
F
Feiyu Chan 已提交
1063
                    inputs, state_shape, dtype, 0, batch_index, 1)
1064 1065 1066 1067
                for _ in range(self.state_components)
            ])
        else:
            initial_states = [initial_states] if isinstance(
Z
zhiboniu 已提交
1068
                initial_states, paddle.static.Variable) else initial_states
F
Feiyu Chan 已提交
1069

1070 1071
        if self.could_use_cudnn and (not paddle.device.is_compiled_with_rocm()
                                     or sequence_length is None):
1072 1073 1074
            # Add CPU kernel and dispatch in backend later
            return self._cudnn_impl(inputs, initial_states, sequence_length)

F
Feiyu Chan 已提交
1075 1076 1077 1078 1079 1080
        states = split_states(initial_states, self.num_directions == 2,
                              self.state_components)
        final_states = []

        for i, rnn_layer in enumerate(self):
            if i > 0:
1081 1082 1083 1084
                inputs = F.dropout(inputs,
                                   self.dropout,
                                   training=self.training,
                                   mode="upscale_in_train")
F
Feiyu Chan 已提交
1085 1086 1087 1088 1089 1090 1091 1092
            outputs, final_state = rnn_layer(inputs, states[i], sequence_length)
            final_states.append(final_state)
            inputs = outputs

        final_states = concat_states(final_states, self.num_directions == 2,
                                     self.state_components)
        return outputs, final_states

1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
    def extra_repr(self):
        main_str = '{input_size}, {hidden_size}'
        if self.num_layers != 1:
            main_str += ', num_layers={num_layers}'
        if self.time_major != False:
            main_str += ', time_major={time_major}'
        if self.dropout != 0:
            main_str += ', dropout={dropout}'
        return main_str.format(**self.__dict__)

F
Feiyu Chan 已提交
1103

1104
class SimpleRNN(RNNBase):
F
Feiyu Chan 已提交
1105
    r"""
1106
    Multilayer Elman network(SimpleRNN). It takes input sequences and initial
F
Feiyu Chan 已提交
1107 1108
    states as inputs, and returns the output sequences and the final states.

1109 1110 1111
    Each layer inside the SimpleRNN maps the input sequences and initial states
    to the output sequences and final states in the following manner: at each
    step, it takes step inputs(:math:`x_{t}`) and previous
F
Feiyu Chan 已提交
1112 1113 1114 1115 1116
    states(:math:`h_{t-1}`) as inputs, and returns step outputs(:math:`y_{t}`)
    and new states(:math:`h_{t}`).

    .. math::

1117
        h_{t} & = act(W_{ih}x_{t} + b_{ih} + W_{hh}h_{t-1} + b_{hh})
1118

F
Feiyu Chan 已提交
1119
        y_{t} & = h_{t}
1120

1121
    where :math:`act` is for :attr:`activation`.
1122 1123

    Using key word arguments to construct is recommended.
F
Feiyu Chan 已提交
1124

1125
    Parameters:
1126 1127 1128
        input_size (int): The input size of :math:`x` for the first layer's cell.
        hidden_size (int): The hidden size of :math:`h` for each layer's cell.
        num_layers (int, optional): Number of recurrent layers. Defaults to 1.
1129 1130
        direction (str, optional): The direction of the network. It can be "forward"
            or "bidirect"(or "bidirectional"). When "bidirect", the way to merge
1131
            outputs of forward and backward is concatenating. Defaults to "forward".
1132 1133
        time_major (bool, optional): Whether the first dimension of the input
            means the time steps. If time_major is True, the shape of Tensor is
1134 1135
            [time_steps,batch_size,input_size], otherwise [batch_size, time_steps,input_size].
            Defaults to False. `time_steps` means the length of input sequence.
1136 1137
        dropout (float, optional): The droput probability. Dropout is applied
            to the input of each layer except for the first layer. The range of
1138
            dropout from 0 to 1. Defaults to 0.
1139
        activation (str, optional): The activation in each SimpleRNN cell. It can be
1140
            `tanh` or `relu`. Defaults to `tanh`.
1141
        weight_ih_attr (ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
1142
            `weight_ih` of each cell. Defaults to None.
1143
        weight_hh_attr (ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
1144
            `weight_hh` of each cell. Defaults to None.
1145
        bias_ih_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
1146
            `bias_ih` of each cells. Defaults to None.
1147
        bias_hh_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
1148
            `bias_hh` of each cells. Defaults to None.
1149
        name (str, optional): Name for the operation (optional, default is
F
Feiyu Chan 已提交
1150 1151
            None). For more information, please refer to :ref:`api_guide_Name`.

1152
    Inputs:
1153
        - **inputs** (Tensor): the input sequence. If `time_major` is True, the shape is `[time_steps, batch_size, input_size]`, else, the shape is `[batch_size, time_steps, input_size]`. `time_steps` means the length of the input sequence.
1154 1155
        - **initial_states** (Tensor, optional): the initial state. The shape is `[num_layers * num_directions, batch_size, hidden_size]`. If initial_state is not given, zero initial states are used.
        - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None. If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whose time step index are not less than the valid length are treated as paddings.
F
Feiyu Chan 已提交
1156 1157

    Returns:
1158

1159
        - **outputs** (Tensor): the output sequence. If `time_major` is True, the shape is `[time_steps, batch_size, num_directions * hidden_size]`, else, the shape is `[batch_size, time_steps, num_directions * hidden_size]`. Note that `num_directions` is 2 if direction is "bidirectional" else 1. `time_steps` means the length of the output sequence.
1160

1161
        - **final_states** (Tensor): final states. The shape is `[num_layers * num_directions, batch_size, hidden_size]`. Note that `num_directions` is 2 if direction is "bidirectional" (the index of forward states are 0, 2, 4, 6... and the index of backward states are 1, 3, 5, 7...), else 1.
1162 1163 1164 1165 1166 1167

    Variables:
        - **weight_ih_l[k]**: the learnable input-hidden weights of the k-th layer. If `k = 0`, the shape is `[hidden_size, input_size]`. Otherwise, the shape is `[hidden_size, num_directions * hidden_size]`.
        - **weight_hh_l[k]**: the learnable hidden-hidden weights of the k-th layer, with shape `[hidden_size, hidden_size]`.
        - **bias_ih_l[k]**: the learnable input-hidden bias of the k-th layer, with shape `[hidden_size]`.
        - **bias_hh_l[k]**: the learnable hidden-hidden bias of the k-th layer, with shape `[hidden_size]`.
1168

F
Feiyu Chan 已提交
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
    Examples:

        .. code-block:: python

            import paddle

            rnn = paddle.nn.SimpleRNN(16, 32, 2)

            x = paddle.randn((4, 23, 16))
            prev_h = paddle.randn((2, 4, 32))
            y, h = rnn(x, prev_h)

1181 1182 1183 1184 1185 1186
            print(y.shape)
            print(h.shape)

            #[4,23,32]
            #[2,4,32]

F
Feiyu Chan 已提交
1187 1188 1189 1190 1191 1192 1193 1194
    """

    def __init__(self,
                 input_size,
                 hidden_size,
                 num_layers=1,
                 direction="forward",
                 time_major=False,
1195 1196
                 dropout=0.,
                 activation="tanh",
F
Feiyu Chan 已提交
1197 1198 1199 1200 1201
                 weight_ih_attr=None,
                 weight_hh_attr=None,
                 bias_ih_attr=None,
                 bias_hh_attr=None,
                 name=None):
1202 1203 1204 1205
        if activation == "tanh":
            mode = "RNN_TANH"
        elif activation == "relu":
            mode = "RNN_RELU"
F
Feiyu Chan 已提交
1206
        else:
1207 1208
            raise ValueError("Unknown activation '{}'".format(activation))
        self.activation = activation
1209 1210 1211 1212
        super(SimpleRNN,
              self).__init__(mode, input_size, hidden_size, num_layers,
                             direction, time_major, dropout, weight_ih_attr,
                             weight_hh_attr, bias_ih_attr, bias_hh_attr)
F
Feiyu Chan 已提交
1213 1214


1215
class LSTM(RNNBase):
F
Feiyu Chan 已提交
1216
    r"""
1217
    Multilayer LSTM. It takes a sequence and an initial state as inputs, and
F
Feiyu Chan 已提交
1218 1219
    returns the output sequences and the final states.

1220 1221 1222 1223
    Each layer inside the LSTM maps the input sequences and initial states
    to the output sequences and final states in the following manner: at each
    step, it takes step inputs(:math:`x_{t}`) and previous
    states(:math:`h_{t-1}, c_{t-1}`) as inputs, and returns step
F
Feiyu Chan 已提交
1224 1225 1226 1227 1228
    outputs(:math:`y_{t}`) and new states(:math:`h_{t}, c_{t}`).

    .. math::

        i_{t} & = \sigma(W_{ii}x_{t} + b_{ii} + W_{hi}h_{t-1} + b_{hi})
1229

F
Feiyu Chan 已提交
1230
        f_{t} & = \sigma(W_{if}x_{t} + b_{if} + W_{hf}h_{t-1} + b_{hf})
1231

F
Feiyu Chan 已提交
1232
        o_{t} & = \sigma(W_{io}x_{t} + b_{io} + W_{ho}h_{t-1} + b_{ho})
1233 1234 1235 1236 1237 1238 1239

        \widetilde{c}_{t} & = \tanh (W_{ig}x_{t} + b_{ig} + W_{hg}h_{t-1} + b_{hg})

        c_{t} & = f_{t} * c_{t-1} + i_{t} * \widetilde{c}_{t}

        h_{t} & = o_{t} * \tanh(c_{t})

F
Feiyu Chan 已提交
1240 1241
        y_{t} & = h_{t}

1242
    where :math:`\sigma` is the sigmoid fucntion, and * is the elemetwise
F
Feiyu Chan 已提交
1243 1244
    multiplication operator.

1245 1246
    Using key word arguments to construct is recommended.

1247
    Parameters:
1248 1249 1250
        input_size (int): The input size of :math:`x` for the first layer's cell.
        hidden_size (int): The hidden size of :math:`h` for each layer's cell.
        num_layers (int, optional): Number of recurrent layers. Defaults to 1.
1251 1252
        direction (str, optional): The direction of the network. It can be "forward"
            or "bidirect"(or "bidirectional"). When "bidirect", the way to merge
1253
            outputs of forward and backward is concatenating. Defaults to "forward".
1254 1255
        time_major (bool, optional): Whether the first dimension of the input
            means the time steps. If time_major is True, the shape of Tensor is
1256 1257
            [time_steps,batch_size,input_size], otherwise [batch_size, time_steps,input_size].
            Defaults to False. `time_steps` means the length of input sequence.
1258 1259
        dropout (float, optional): The droput probability. Dropout is applied
            to the input of each layer except for the first layer. The range of
1260
            dropout from 0 to 1. Defaults to 0.
1261
        weight_ih_attr (ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
1262
            `weight_ih` of each cell. Default: None.
1263
        weight_hh_attr (ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
1264
            `weight_hh` of each cell. Default: None.
1265
        bias_ih_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
1266
            `bias_ih` of each cells. Default: None.
1267
        bias_hh_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
1268
            `bias_hh` of each cells. Default: None.
1269
        name (str, optional): Name for the operation (optional, default is
F
Feiyu Chan 已提交
1270 1271 1272
            None). For more information, please refer to :ref:`api_guide_Name`.

    Inputs:
1273
        - **inputs** (Tensor): the input sequence. If `time_major` is True, the shape is `[time_steps, batch_size, input_size]`, else, the shape is `[batch_size, time_steps, input_size]`. `time_steps` means the length of the input sequence.
1274
        - **initial_states** (list|tuple, optional): the initial state, a list/tuple of (h, c), the shape of each is `[num_layers * num_directions, batch_size, hidden_size]`. If initial_state is not given, zero initial states are used.
1275
        - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None. If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whos time step index are not less than the valid length are treated as paddings.
F
Feiyu Chan 已提交
1276 1277

    Returns:
1278

1279
        - **outputs** (Tensor): the output sequence. If `time_major` is True, the shape is `[time_steps, batch_size, num_directions * hidden_size]`, If `time_major` is False, the shape is `[batch_size, time_steps, num_directions * hidden_size]`. Note that `num_directions` is 2 if direction is "bidirectional" else 1. `time_steps` means the length of the output sequence.
1280

1281
        - **final_states** (tuple): the final state, a tuple of two tensors, h and c. The shape of each is `[num_layers * num_directions, batch_size, hidden_size]`. Note that `num_directions` is 2 if direction is "bidirectional" (the index of forward states are 0, 2, 4, 6... and the index of backward states are 1, 3, 5, 7...), else 1.
1282 1283 1284 1285 1286 1287

    Variables:
        - **weight_ih_l[k]**: the learnable input-hidden weights of the k-th layer. If `k = 0`, the shape is `[hidden_size, input_size]`. Otherwise, the shape is `[hidden_size, num_directions * hidden_size]`.
        - **weight_hh_l[k]**: the learnable hidden-hidden weights of the k-th layer, with shape `[hidden_size, hidden_size]`.
        - **bias_ih_l[k]**: the learnable input-hidden bias of the k-th layer, with shape `[hidden_size]`.
        - **bias_hh_l[k]**: the learnable hidden-hidden bias of the k-th layer, swith shape `[hidden_size]`.
1288

F
Feiyu Chan 已提交
1289
    Examples:
1290

F
Feiyu Chan 已提交
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
        .. code-block:: python

            import paddle

            rnn = paddle.nn.LSTM(16, 32, 2)

            x = paddle.randn((4, 23, 16))
            prev_h = paddle.randn((2, 4, 32))
            prev_c = paddle.randn((2, 4, 32))
            y, (h, c) = rnn(x, (prev_h, prev_c))

1302 1303 1304 1305 1306 1307 1308 1309
            print(y.shape)
            print(h.shape)
            print(c.shape)

            #[4,23,32]
            #[2,4,32]
            #[2,4,32]

F
Feiyu Chan 已提交
1310 1311 1312 1313 1314 1315 1316 1317
    """

    def __init__(self,
                 input_size,
                 hidden_size,
                 num_layers=1,
                 direction="forward",
                 time_major=False,
1318
                 dropout=0.,
F
Feiyu Chan 已提交
1319 1320 1321 1322 1323
                 weight_ih_attr=None,
                 weight_hh_attr=None,
                 bias_ih_attr=None,
                 bias_hh_attr=None,
                 name=None):
1324 1325 1326 1327
        super(LSTM,
              self).__init__("LSTM", input_size, hidden_size, num_layers,
                             direction, time_major, dropout, weight_ih_attr,
                             weight_hh_attr, bias_ih_attr, bias_hh_attr)
F
Feiyu Chan 已提交
1328 1329


1330
class GRU(RNNBase):
F
Feiyu Chan 已提交
1331
    r"""
1332
    Multilayer GRU. It takes input sequencse and initial states as inputs, and
F
Feiyu Chan 已提交
1333 1334
    returns the output sequences and the final states.

1335 1336 1337 1338
    Each layer inside the GRU maps the input sequences and initial states
    to the output sequences and final states in the following manner: at each
    step, it takes step inputs(:math:`x_{t}`) and previous
    states(:math:`h_{t-1}`) as inputs, and returns step outputs(:math:`y_{t}`)
F
Feiyu Chan 已提交
1339 1340 1341 1342
    and new states(:math:`h_{t}`).

    .. math::

1343
        r_{t} & = \sigma(W_{ir}x_{t} + b_{ir} + W_{hr}h_{t-1} + b_{hr})
1344

1345
        z_{t} & = \sigma(W_{iz}x_{t} + b_{iz} + W_{hz}h_{t-1} + b_{hz})
1346

1347
        \widetilde{h}_{t} & = \tanh(W_{ic}x_{t} + b_{ic} + r_{t} * (W_{hc}h_{t-1} + b_{hc}))
1348 1349 1350

        h_{t} & = z_{t} * h_{t-1} + (1 - z_{t}) * \widetilde{h}_{t}

F
Feiyu Chan 已提交
1351 1352
        y_{t} & = h_{t}

1353
    where :math:`\sigma` is the sigmoid fucntion, and * is the elemetwise
F
Feiyu Chan 已提交
1354 1355
    multiplication operator.

1356 1357
    Using key word arguments to construct is recommended.

1358
    Parameters:
1359 1360 1361
        input_size (int): The input size of :math:`x` for the first layer's cell.
        hidden_size (int): The hidden size of :math:`h` for each layer's cell.
        num_layers (int, optional): Number of recurrent layers. Defaults to 1.
1362 1363
        direction (str, optional): The direction of the network. It can be "forward"
            or "bidirect"(or "bidirectional"). When "bidirect", the way to merge
1364
            outputs of forward and backward is concatenating. Defaults to "forward".
1365 1366
        time_major (bool, optional): Whether the first dimension of the input
            means the time steps. If time_major is True, the shape of Tensor is
1367 1368
            [time_steps,batch_size,input_size], otherwise [batch_size, time_steps,input_size].
            Defaults to False. `time_steps` means the length of input sequence.
1369 1370
        dropout (float, optional): The droput probability. Dropout is applied
            to the input of each layer except for the first layer. The range of
1371
            dropout from 0 to 1. Defaults to 0.
1372
        weight_ih_attr (ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
1373
            `weight_ih` of each cell. Default: None.
1374
        weight_hh_attr (ParamAttr, optional): The parameter attribute for
F
Feiyu Chan 已提交
1375
            `weight_hh` of each cell. Default: None.
1376
        bias_ih_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
1377
            `bias_ih` of each cells. Default: None.
1378
        bias_hh_attr (ParamAttr, optional): The parameter attribute for the
F
Feiyu Chan 已提交
1379
            `bias_hh` of each cells. Default: None.
1380
        name (str, optional): Name for the operation (optional, default is
F
Feiyu Chan 已提交
1381 1382 1383
            None). For more information, please refer to :ref:`api_guide_Name`.

    Inputs:
1384
        - **inputs** (Tensor): the input sequence. If `time_major` is True, the shape is `[time_steps, batch_size, input_size]`, else, the shape is `[batch_size, time_steps, input_size]`. `time_steps` means the length of the input sequence.
1385 1386
        - **initial_states** (Tensor, optional): the initial state. The shape is `[num_layers * num_directions, batch_size, hidden_size]`. If initial_state is not given, zero initial states are used. Defaults to None.
        - **sequence_length** (Tensor, optional): shape `[batch_size]`, dtype: int64 or int32. The valid lengths of input sequences. Defaults to None. If `sequence_length` is not None, the inputs are treated as padded sequences. In each input sequence, elements whos time step index are not less than the valid length are treated as paddings.
F
Feiyu Chan 已提交
1387 1388

    Returns:
1389

1390
        - **outputs** (Tensor): the output sequence. If `time_major` is True, the shape is `[time_steps, batch_size, num_directions * hidden_size]`, else, the shape is `[batch_size, time_steps, num_directions * hidden_size]`. Note that `num_directions` is 2 if direction is "bidirectional" else 1. `time_steps` means the length of the output sequence.
1391

1392
        - **final_states** (Tensor): final states. The shape is `[num_layers * num_directions, batch_size, hidden_size]`. Note that `num_directions` is 2 if direction is "bidirectional" (the index of forward states are 0, 2, 4, 6... and the index of backward states are 1, 3, 5, 7...), else 1.
1393 1394 1395 1396 1397 1398

    Variables:
        - **weight_ih_l[k]**: the learnable input-hidden weights of the k-th layer. If `k = 0`, the shape is `[hidden_size, input_size]`. Otherwise, the shape is `[hidden_size, num_directions * hidden_size]`.
        - **weight_hh_l[k]**: the learnable hidden-hidden weights of the k-th layer, with shape `[hidden_size, hidden_size]`.
        - **bias_ih_l[k]**: the learnable input-hidden bias of the k-th layer, with shape `[hidden_size]`.
        - **bias_hh_l[k]**: the learnable hidden-hidden bias of the k-th layer, with shape `[hidden_size]`.
1399

F
Feiyu Chan 已提交
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
    Examples:

        .. code-block:: python

            import paddle

            rnn = paddle.nn.GRU(16, 32, 2)

            x = paddle.randn((4, 23, 16))
            prev_h = paddle.randn((2, 4, 32))
            y, h = rnn(x, prev_h)

1412 1413 1414 1415 1416 1417
            print(y.shape)
            print(h.shape)

            #[4,23,32]
            #[2,4,32]

F
Feiyu Chan 已提交
1418 1419 1420 1421 1422 1423 1424 1425
    """

    def __init__(self,
                 input_size,
                 hidden_size,
                 num_layers=1,
                 direction="forward",
                 time_major=False,
1426
                 dropout=0.,
F
Feiyu Chan 已提交
1427 1428 1429 1430 1431
                 weight_ih_attr=None,
                 weight_hh_attr=None,
                 bias_ih_attr=None,
                 bias_hh_attr=None,
                 name=None):
1432 1433 1434 1435
        super(GRU,
              self).__init__("GRU", input_size, hidden_size, num_layers,
                             direction, time_major, dropout, weight_ih_attr,
                             weight_hh_attr, bias_ih_attr, bias_hh_attr)