decoder.py 8.1 KB
Newer Older
L
lifuchen 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
L
lifuchen 已提交
14
import math
L
lifuchen 已提交
15 16
import paddle.fluid.dygraph as dg
import paddle.fluid as fluid
17
from parakeet.models.transformer_tts.utils import *
L
lifuchen 已提交
18
from parakeet.modules.multihead_attention import MultiheadAttention
L
lifuchen 已提交
19
from parakeet.modules.ffn import PositionwiseFeedForward
L
lifuchen 已提交
20 21
from parakeet.models.transformer_tts.prenet import PreNet
from parakeet.models.transformer_tts.post_convnet import PostConvNet
L
lifuchen 已提交
22

L
lifuchen 已提交
23

L
lifuchen 已提交
24
class Decoder(dg.Layer):
25
    def __init__(self, num_hidden, config, num_head=4, n_layers=3):
26 27 28 29 30 31 32 33
        """Decoder layer of TransformerTTS.

        Args:
            num_hidden (int): the number of source vocabulary.
            config: the yaml configs used in decoder.
            n_layers (int, optional): the layers number of multihead attention. Defaults to 4.
            num_head (int, optional): the head number of multihead attention. Defaults to 3.
        """
L
lifuchen 已提交
34 35
        super(Decoder, self).__init__()
        self.num_hidden = num_hidden
36
        self.num_head = num_head
L
lifuchen 已提交
37
        param = fluid.ParamAttr()
L
lifuchen 已提交
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
        self.alpha = self.create_parameter(
            shape=(1, ),
            attr=param,
            dtype='float32',
            default_initializer=fluid.initializer.ConstantInitializer(
                value=1.0))
        self.pos_inp = get_sinusoid_encoding_table(
            1024, self.num_hidden, padding_idx=0)
        self.pos_emb = dg.Embedding(
            size=[1024, num_hidden],
            padding_idx=0,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    self.pos_inp),
                trainable=False))
        self.decoder_prenet = PreNet(
            input_size=config['audio']['num_mels'],
            hidden_size=num_hidden * 2,
            output_size=num_hidden,
            dropout_rate=0.2)
L
lifuchen 已提交
58
        k = math.sqrt(1 / num_hidden)
L
lifuchen 已提交
59 60 61 62 63 64 65
        self.linear = dg.Linear(
            num_hidden,
            num_hidden,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))
L
lifuchen 已提交
66

L
lifuchen 已提交
67 68
        self.selfattn_layers = [
            MultiheadAttention(num_hidden, num_hidden // num_head,
69
                               num_hidden // num_head) for _ in range(n_layers)
L
lifuchen 已提交
70
        ]
L
lifuchen 已提交
71 72
        for i, layer in enumerate(self.selfattn_layers):
            self.add_sublayer("self_attn_{}".format(i), layer)
L
lifuchen 已提交
73 74
        self.attn_layers = [
            MultiheadAttention(num_hidden, num_hidden // num_head,
75
                               num_hidden // num_head) for _ in range(n_layers)
L
lifuchen 已提交
76
        ]
L
lifuchen 已提交
77 78
        for i, layer in enumerate(self.attn_layers):
            self.add_sublayer("attn_{}".format(i), layer)
L
lifuchen 已提交
79 80 81
        self.ffns = [
            PositionwiseFeedForward(
                num_hidden, num_hidden * num_head, filter_size=1)
82
            for _ in range(n_layers)
L
lifuchen 已提交
83
        ]
L
lifuchen 已提交
84 85
        for i, layer in enumerate(self.ffns):
            self.add_sublayer("ffns_{}".format(i), layer)
L
lifuchen 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99
        self.mel_linear = dg.Linear(
            num_hidden,
            config['audio']['num_mels'] * config['audio']['outputs_per_step'],
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))
        self.stop_linear = dg.Linear(
            num_hidden,
            1,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))
L
lifuchen 已提交
100

L
lifuchen 已提交
101 102 103 104 105 106 107 108
        self.postconvnet = PostConvNet(
            config['audio']['num_mels'],
            config['hidden_size'],
            filter_size=5,
            padding=4,
            num_conv=5,
            outputs_per_step=config['audio']['outputs_per_step'],
            use_cudnn=True)
L
lifuchen 已提交
109

110 111 112 113 114 115 116 117 118
    def forward(self,
                key,
                value,
                query,
                positional,
                mask,
                m_mask=None,
                m_self_mask=None,
                zero_mask=None):
119
        """
120 121
        Compute decoder outputs.
        
122
        Args:
123 124 125 126 127 128 129 130 131 132
            key (Variable): shape(B, T_text, C), dtype float32, the input key of decoder,
                where T_text means the timesteps of input text,
            value (Variable): shape(B, T_text, C), dtype float32, the input value of decoder.
            query (Variable): shape(B, T_mel, C), dtype float32, the input query of decoder,
                where T_mel means the timesteps of input spectrum,
            positional (Variable): shape(B, T_mel), dtype int64, the spectrum position. 
            mask (Variable): shape(B, T_mel, T_mel), dtype int64, the mask of decoder self attention.
            m_mask (Variable, optional): shape(B, T_mel, 1), dtype int64, the query mask of encoder-decoder attention. Defaults to None.
            m_self_mask (Variable, optional): shape(B, T_mel, 1), dtype int64, the query mask of decoder self attention. Defaults to None.
            zero_mask (Variable, optional): shape(B, T_mel, T_text), dtype int64, query mask of encoder-decoder attention. Defaults to None.
133 134
                
        Returns:
135 136 137 138 139
            mel_out (Variable): shape(B, T_mel, C), the decoder output after mel linear projection.
            out (Variable): shape(B, T_mel, C), the decoder output after post mel network.
            stop_tokens (Variable): shape(B, T_mel, 1), the stop tokens of output.
            attn_list (list[Variable]): len(n_layers), the encoder-decoder attention list.
            selfattn_list (list[Variable]): len(n_layers), the decoder self attention list.
140
        """
L
lifuchen 已提交
141 142

        # get decoder mask with triangular matrix
L
lifuchen 已提交
143

L
lifuchen 已提交
144
        if fluid.framework._dygraph_tracer()._train_mode:
145
            m_mask = layers.expand(m_mask, [self.num_head, 1, key.shape[1]])
146 147
            m_self_mask = layers.expand(m_self_mask,
                                        [self.num_head, 1, query.shape[1]])
148 149 150
            mask = layers.expand(mask, [self.num_head, 1, 1])
            zero_mask = layers.expand(zero_mask, [self.num_head, 1, 1])

L
lifuchen 已提交
151
        else:
152
            m_mask, m_self_mask, zero_mask = None, None, None
L
lifuchen 已提交
153

154
        # Decoder pre-network
L
lifuchen 已提交
155
        query = self.decoder_prenet(query)
L
lifuchen 已提交
156

L
lifuchen 已提交
157 158 159 160 161 162 163 164
        # Centered position
        query = self.linear(query)

        # Get position embedding
        positional = self.pos_emb(positional)
        query = positional * self.alpha + query

        #positional dropout
165 166
        query = fluid.layers.dropout(
            query, 0.1, dropout_implementation='upscale_in_train')
L
lifuchen 已提交
167 168 169 170

        # Attention decoder-decoder, encoder-decoder
        selfattn_list = list()
        attn_list = list()
L
lifuchen 已提交
171 172 173 174

        for selfattn, attn, ffn in zip(self.selfattn_layers, self.attn_layers,
                                       self.ffns):
            query, attn_dec = selfattn(
175
                query, query, query, mask=mask, query_mask=m_self_mask)
L
lifuchen 已提交
176 177
            query, attn_dot = attn(
                key, value, query, mask=zero_mask, query_mask=m_mask)
L
lifuchen 已提交
178 179 180
            query = ffn(query)
            selfattn_list.append(attn_dec)
            attn_list.append(attn_dot)
181

L
lifuchen 已提交
182 183 184 185 186
        # Mel linear projection
        mel_out = self.mel_linear(query)
        # Post Mel Network
        out = self.postconvnet(mel_out)
        out = mel_out + out
L
lifuchen 已提交
187

L
lifuchen 已提交
188 189 190 191 192 193
        # Stop tokens
        stop_tokens = self.stop_linear(query)
        stop_tokens = layers.squeeze(stop_tokens, [-1])
        stop_tokens = layers.sigmoid(stop_tokens)

        return mel_out, out, attn_list, stop_tokens, selfattn_list