decoder.py 6.1 KB
Newer Older
L
lifuchen 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
L
lifuchen 已提交
14
import math
L
lifuchen 已提交
15 16
import paddle.fluid.dygraph as dg
import paddle.fluid as fluid
17
from parakeet.models.transformer_tts.utils import *
L
lifuchen 已提交
18
from parakeet.modules.multihead_attention import MultiheadAttention
L
lifuchen 已提交
19
from parakeet.modules.ffn import PositionwiseFeedForward
L
lifuchen 已提交
20 21
from parakeet.models.transformer_tts.prenet import PreNet
from parakeet.models.transformer_tts.post_convnet import PostConvNet
L
lifuchen 已提交
22

L
lifuchen 已提交
23

L
lifuchen 已提交
24 25 26 27
class Decoder(dg.Layer):
    def __init__(self, num_hidden, config, num_head=4):
        super(Decoder, self).__init__()
        self.num_hidden = num_hidden
28
        self.num_head = num_head
L
lifuchen 已提交
29
        param = fluid.ParamAttr()
L
lifuchen 已提交
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
        self.alpha = self.create_parameter(
            shape=(1, ),
            attr=param,
            dtype='float32',
            default_initializer=fluid.initializer.ConstantInitializer(
                value=1.0))
        self.pos_inp = get_sinusoid_encoding_table(
            1024, self.num_hidden, padding_idx=0)
        self.pos_emb = dg.Embedding(
            size=[1024, num_hidden],
            padding_idx=0,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    self.pos_inp),
                trainable=False))
        self.decoder_prenet = PreNet(
            input_size=config['audio']['num_mels'],
            hidden_size=num_hidden * 2,
            output_size=num_hidden,
            dropout_rate=0.2)
L
lifuchen 已提交
50
        k = math.sqrt(1 / num_hidden)
L
lifuchen 已提交
51 52 53 54 55 56 57
        self.linear = dg.Linear(
            num_hidden,
            num_hidden,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))
L
lifuchen 已提交
58

L
lifuchen 已提交
59 60 61 62
        self.selfattn_layers = [
            MultiheadAttention(num_hidden, num_hidden // num_head,
                               num_hidden // num_head) for _ in range(3)
        ]
L
lifuchen 已提交
63 64
        for i, layer in enumerate(self.selfattn_layers):
            self.add_sublayer("self_attn_{}".format(i), layer)
L
lifuchen 已提交
65 66 67 68
        self.attn_layers = [
            MultiheadAttention(num_hidden, num_hidden // num_head,
                               num_hidden // num_head) for _ in range(3)
        ]
L
lifuchen 已提交
69 70
        for i, layer in enumerate(self.attn_layers):
            self.add_sublayer("attn_{}".format(i), layer)
L
lifuchen 已提交
71 72 73 74 75
        self.ffns = [
            PositionwiseFeedForward(
                num_hidden, num_hidden * num_head, filter_size=1)
            for _ in range(3)
        ]
L
lifuchen 已提交
76 77
        for i, layer in enumerate(self.ffns):
            self.add_sublayer("ffns_{}".format(i), layer)
L
lifuchen 已提交
78 79 80 81 82 83 84 85 86 87 88 89 90 91
        self.mel_linear = dg.Linear(
            num_hidden,
            config['audio']['num_mels'] * config['audio']['outputs_per_step'],
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))
        self.stop_linear = dg.Linear(
            num_hidden,
            1,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))
L
lifuchen 已提交
92

L
lifuchen 已提交
93 94 95 96 97 98 99 100
        self.postconvnet = PostConvNet(
            config['audio']['num_mels'],
            config['hidden_size'],
            filter_size=5,
            padding=4,
            num_conv=5,
            outputs_per_step=config['audio']['outputs_per_step'],
            use_cudnn=True)
L
lifuchen 已提交
101

102 103 104 105 106 107 108 109 110
    def forward(self,
                key,
                value,
                query,
                positional,
                mask,
                m_mask=None,
                m_self_mask=None,
                zero_mask=None):
L
lifuchen 已提交
111 112

        # get decoder mask with triangular matrix
L
lifuchen 已提交
113

L
lifuchen 已提交
114
        if fluid.framework._dygraph_tracer()._train_mode:
115
            m_mask = layers.expand(m_mask, [self.num_head, 1, key.shape[1]])
116 117
            m_self_mask = layers.expand(m_self_mask,
                                        [self.num_head, 1, query.shape[1]])
118 119 120
            mask = layers.expand(mask, [self.num_head, 1, 1])
            zero_mask = layers.expand(zero_mask, [self.num_head, 1, 1])

L
lifuchen 已提交
121
        else:
122
            m_mask, m_self_mask, zero_mask = None, None, None
L
lifuchen 已提交
123

124
# Decoder pre-network
L
lifuchen 已提交
125
        query = self.decoder_prenet(query)
L
lifuchen 已提交
126

L
lifuchen 已提交
127 128 129 130 131 132 133 134
        # Centered position
        query = self.linear(query)

        # Get position embedding
        positional = self.pos_emb(positional)
        query = positional * self.alpha + query

        #positional dropout
135 136
        query = fluid.layers.dropout(
            query, 0.1, dropout_implementation='upscale_in_train')
L
lifuchen 已提交
137 138 139 140

        # Attention decoder-decoder, encoder-decoder
        selfattn_list = list()
        attn_list = list()
L
lifuchen 已提交
141 142 143 144

        for selfattn, attn, ffn in zip(self.selfattn_layers, self.attn_layers,
                                       self.ffns):
            query, attn_dec = selfattn(
145
                query, query, query, mask=mask, query_mask=m_self_mask)
L
lifuchen 已提交
146 147
            query, attn_dot = attn(
                key, value, query, mask=zero_mask, query_mask=m_mask)
L
lifuchen 已提交
148 149 150
            query = ffn(query)
            selfattn_list.append(attn_dec)
            attn_list.append(attn_dot)
151

L
lifuchen 已提交
152 153 154 155 156
        # Mel linear projection
        mel_out = self.mel_linear(query)
        # Post Mel Network
        out = self.postconvnet(mel_out)
        out = mel_out + out
L
lifuchen 已提交
157

L
lifuchen 已提交
158 159 160 161 162 163
        # Stop tokens
        stop_tokens = self.stop_linear(query)
        stop_tokens = layers.squeeze(stop_tokens, [-1])
        stop_tokens = layers.sigmoid(stop_tokens)

        return mel_out, out, attn_list, stop_tokens, selfattn_list