layer.py 8.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
"""Contains DeepSpeech2 layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import paddle.v2 as paddle


def conv_bn_layer(input, filter_size, num_channels_in, num_channels_out, stride,
                  padding, act):
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
    """Convolution layer with batch normalization.

    :param input: Input layer.
    :type input: LayerOutput
    :param filter_size: The x dimension of a filter kernel. Or input a tuple for
                        two image dimension.
    :type filter_size: int|tuple|list
    :param num_channels_in: Number of input channels.
    :type num_channels_in: int
    :type num_channels_out: Number of output channels.
    :type num_channels_in: out
    :param padding: The x dimension of the padding. Or input a tuple for two
                    image dimension.
    :type padding: int|tuple|list
    :param act: Activation type.
    :type act: BaseActivation
    :return: Batch norm layer after convolution layer.
    :rtype: LayerOutput
29 30 31 32 33 34 35 36 37 38
    """
    conv_layer = paddle.layer.img_conv(
        input=input,
        filter_size=filter_size,
        num_channels=num_channels_in,
        num_filters=num_channels_out,
        stride=stride,
        padding=padding,
        act=paddle.activation.Linear(),
        bias_attr=False)
39
    return paddle.layer.batch_norm(input=conv_layer, act=act)
40 41 42


def bidirectional_simple_rnn_bn_layer(name, input, size, act):
43
    """Bidirectonal simple rnn layer with sequence-wise batch normalization.
44
    The batch normalization is only performed on input-state weights.
45 46 47 48 49 50 51 52 53 54 55

    :param name: Name of the layer.
    :type name: string
    :param input: Input layer.
    :type input: LayerOutput
    :param size: Number of RNN cells.
    :type size: int
    :param act: Activation type.
    :type act: BaseActivation
    :return: Bidirectional simple rnn layer.
    :rtype: LayerOutput
56 57
    """
    # input-hidden weights shared across bi-direcitonal rnn.
58
    input_proj_forward = paddle.layer.fc(
59
        input=input, size=size, act=paddle.activation.Linear(), bias_attr=False)
60
    input_proj_backward = paddle.layer.fc(
61
        input=input, size=size, act=paddle.activation.Linear(), bias_attr=False)
X
Xinghai Sun 已提交
62
    # batch norm is only performed on input-state projection
63 64 65 66
    input_proj_bn_forward = paddle.layer.batch_norm(
        input=input_proj_forward, act=paddle.activation.Linear())
    input_proj_bn_backward = paddle.layer.batch_norm(
        input=input_proj_backward, act=paddle.activation.Linear())
67 68
    # forward and backward in time
    forward_simple_rnn = paddle.layer.recurrent(
69
        input=input_proj_bn_forward, act=act, reverse=False)
70
    backward_simple_rnn = paddle.layer.recurrent(
71
        input=input_proj_bn_backward, act=act, reverse=True)
72 73 74
    return paddle.layer.concat(input=[forward_simple_rnn, backward_simple_rnn])


X
Xinghai Sun 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
def bidirectional_gru_bn_layer(name, input, size, act):
    """Bidirectonal gru layer with sequence-wise batch normalization.
    The batch normalization is only performed on input-state weights.

    :param name: Name of the layer.
    :type name: string
    :param input: Input layer.
    :type input: LayerOutput
    :param size: Number of RNN cells.
    :type size: int
    :param act: Activation type.
    :type act: BaseActivation
    :return: Bidirectional simple rnn layer.
    :rtype: LayerOutput
    """
    # input-hidden weights shared across bi-direcitonal rnn.
91 92 93 94 95 96
    input_proj_forward = paddle.layer.fc(
        input=input,
        size=size * 3,
        act=paddle.activation.Linear(),
        bias_attr=False)
    input_proj_backward = paddle.layer.fc(
X
Xinghai Sun 已提交
97 98 99 100 101
        input=input,
        size=size * 3,
        act=paddle.activation.Linear(),
        bias_attr=False)
    # batch norm is only performed on input-state projection
102 103 104 105
    input_proj_bn_forward = paddle.layer.batch_norm(
        input=input_proj_forward, act=paddle.activation.Linear())
    input_proj_bn_backward = paddle.layer.batch_norm(
        input=input_proj_backward, act=paddle.activation.Linear())
X
Xinghai Sun 已提交
106 107
    # forward and backward in time
    forward_gru = paddle.layer.grumemory(
108
        input=input_proj_bn_forward, act=act, reverse=False)
X
Xinghai Sun 已提交
109
    backward_gru = paddle.layer.grumemory(
110
        input=input_proj_bn_backward, act=act, reverse=True)
X
Xinghai Sun 已提交
111 112 113
    return paddle.layer.concat(input=[forward_gru, backward_gru])


114
def conv_group(input, num_stacks):
115 116 117 118 119 120 121 122
    """Convolution group with stacked convolution layers.

    :param input: Input layer.
    :type input: LayerOutput
    :param num_stacks: Number of stacked convolution layers.
    :type num_stacks: int
    :return: Output layer of the convolution group.
    :rtype: LayerOutput
123 124 125 126 127 128
    """
    conv = conv_bn_layer(
        input=input,
        filter_size=(11, 41),
        num_channels_in=1,
        num_channels_out=32,
X
Xinghai Sun 已提交
129
        stride=(2, 2),
130
        padding=(5, 20),
X
Xinghai Sun 已提交
131
        act=paddle.activation.Relu())
132 133 134 135 136 137 138 139
    for i in xrange(num_stacks - 1):
        conv = conv_bn_layer(
            input=conv,
            filter_size=(11, 21),
            num_channels_in=32,
            num_channels_out=32,
            stride=(1, 2),
            padding=(5, 10),
X
Xinghai Sun 已提交
140
            act=paddle.activation.Relu())
141 142 143 144 145
    output_num_channels = 32
    output_height = 160 // pow(2, num_stacks) + 1
    return conv, output_num_channels, output_height


X
Xinghai Sun 已提交
146
def rnn_group(input, size, num_stacks, use_gru):
147 148 149 150 151 152 153 154
    """RNN group with stacked bidirectional simple RNN layers.

    :param input: Input layer.
    :type input: LayerOutput
    :param size: Number of RNN cells in each layer.
    :type size: int
    :param num_stacks: Number of stacked rnn layers.
    :type num_stacks: int
X
Xinghai Sun 已提交
155 156
    :param use_gru: Use gru if set True. Use simple rnn if set False.
    :type use_gru: bool
157 158
    :return: Output layer of the RNN group.
    :rtype: LayerOutput
159 160 161
    """
    output = input
    for i in xrange(num_stacks):
X
Xinghai Sun 已提交
162 163 164 165 166
        if use_gru:
            output = bidirectional_gru_bn_layer(
                name=str(i),
                input=output,
                size=size,
X
Xinghai Sun 已提交
167
                act=paddle.activation.Relu())
X
Xinghai Sun 已提交
168 169 170 171 172
        else:
            output = bidirectional_simple_rnn_bn_layer(
                name=str(i),
                input=output,
                size=size,
X
Xinghai Sun 已提交
173
                act=paddle.activation.Relu())
174 175 176 177 178 179 180 181
    return output


def deep_speech2(audio_data,
                 text_data,
                 dict_size,
                 num_conv_layers=2,
                 num_rnn_layers=3,
X
Xinghai Sun 已提交
182 183
                 rnn_size=256,
                 use_gru=True):
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
    """
    The whole DeepSpeech2 model structure (a simplified version).

    :param audio_data: Audio spectrogram data layer.
    :type audio_data: LayerOutput
    :param text_data: Transcription text data layer.
    :type text_data: LayerOutput
    :param dict_size: Dictionary size for tokenized transcription.
    :type dict_size: int
    :param num_conv_layers: Number of stacking convolution layers.
    :type num_conv_layers: int
    :param num_rnn_layers: Number of stacking RNN layers.
    :type num_rnn_layers: int
    :param rnn_size: RNN layer size (number of RNN cells).
    :type rnn_size: int
X
Xinghai Sun 已提交
199 200
    :param use_gru: Use gru if set True. Use simple rnn if set False.
    :type use_gru: bool
201 202
    :return: A tuple of an output unnormalized log probability layer (
             before softmax) and a ctc cost layer.
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
    :rtype: tuple of LayerOutput
    """
    # convolution group
    conv_group_output, conv_group_num_channels, conv_group_height = conv_group(
        input=audio_data, num_stacks=num_conv_layers)
    # convert data form convolution feature map to sequence of vectors
    conv2seq = paddle.layer.block_expand(
        input=conv_group_output,
        num_channels=conv_group_num_channels,
        stride_x=1,
        stride_y=1,
        block_x=1,
        block_y=conv_group_height)
    # rnn group
    rnn_group_output = rnn_group(
X
Xinghai Sun 已提交
218 219 220 221
        input=conv2seq,
        size=rnn_size,
        num_stacks=num_rnn_layers,
        use_gru=use_gru)
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
    fc = paddle.layer.fc(
        input=rnn_group_output,
        size=dict_size + 1,
        act=paddle.activation.Linear(),
        bias_attr=True)
    # probability distribution with softmax
    log_probs = paddle.layer.mixed(
        input=paddle.layer.identity_projection(input=fc),
        act=paddle.activation.Softmax())
    # ctc cost
    ctc_loss = paddle.layer.warp_ctc(
        input=fc,
        label=text_data,
        size=dict_size + 1,
        blank=dict_size,
        norm_by_times=True)
    return log_probs, ctc_loss