decoder.py 6.3 KB
Newer Older
L
lifuchen 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
L
lifuchen 已提交
14
import math
L
lifuchen 已提交
15 16 17 18
import paddle.fluid.dygraph as dg
import paddle.fluid as fluid
from parakeet.modules.utils import *
from parakeet.modules.multihead_attention import MultiheadAttention
L
lifuchen 已提交
19
from parakeet.modules.ffn import PositionwiseFeedForward
L
lifuchen 已提交
20 21
from parakeet.models.transformer_tts.prenet import PreNet
from parakeet.models.transformer_tts.post_convnet import PostConvNet
L
lifuchen 已提交
22

L
lifuchen 已提交
23

L
lifuchen 已提交
24 25 26 27 28
class Decoder(dg.Layer):
    def __init__(self, num_hidden, config, num_head=4):
        super(Decoder, self).__init__()
        self.num_hidden = num_hidden
        param = fluid.ParamAttr()
L
lifuchen 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
        self.alpha = self.create_parameter(
            shape=(1, ),
            attr=param,
            dtype='float32',
            default_initializer=fluid.initializer.ConstantInitializer(
                value=1.0))
        self.pos_inp = get_sinusoid_encoding_table(
            1024, self.num_hidden, padding_idx=0)
        self.pos_emb = dg.Embedding(
            size=[1024, num_hidden],
            padding_idx=0,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    self.pos_inp),
                trainable=False))
        self.decoder_prenet = PreNet(
            input_size=config['audio']['num_mels'],
            hidden_size=num_hidden * 2,
            output_size=num_hidden,
            dropout_rate=0.2)
L
lifuchen 已提交
49
        k = math.sqrt(1 / num_hidden)
L
lifuchen 已提交
50 51 52 53 54 55 56
        self.linear = dg.Linear(
            num_hidden,
            num_hidden,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))
L
lifuchen 已提交
57

L
lifuchen 已提交
58 59 60 61
        self.selfattn_layers = [
            MultiheadAttention(num_hidden, num_hidden // num_head,
                               num_hidden // num_head) for _ in range(3)
        ]
L
lifuchen 已提交
62 63
        for i, layer in enumerate(self.selfattn_layers):
            self.add_sublayer("self_attn_{}".format(i), layer)
L
lifuchen 已提交
64 65 66 67
        self.attn_layers = [
            MultiheadAttention(num_hidden, num_hidden // num_head,
                               num_hidden // num_head) for _ in range(3)
        ]
L
lifuchen 已提交
68 69
        for i, layer in enumerate(self.attn_layers):
            self.add_sublayer("attn_{}".format(i), layer)
L
lifuchen 已提交
70 71 72 73 74
        self.ffns = [
            PositionwiseFeedForward(
                num_hidden, num_hidden * num_head, filter_size=1)
            for _ in range(3)
        ]
L
lifuchen 已提交
75 76
        for i, layer in enumerate(self.ffns):
            self.add_sublayer("ffns_{}".format(i), layer)
L
lifuchen 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
        self.mel_linear = dg.Linear(
            num_hidden,
            config['audio']['num_mels'] * config['audio']['outputs_per_step'],
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))
        self.stop_linear = dg.Linear(
            num_hidden,
            1,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))

        self.postconvnet = PostConvNet(
            config['audio']['num_mels'],
            config['hidden_size'],
            filter_size=5,
            padding=4,
            num_conv=5,
            outputs_per_step=config['audio']['outputs_per_step'],
            use_cudnn=True)
L
lifuchen 已提交
100 101 102 103

    def forward(self, key, value, query, c_mask, positional):

        # get decoder mask with triangular matrix
L
lifuchen 已提交
104

L
lifuchen 已提交
105 106
        if fluid.framework._dygraph_tracer()._train_mode:
            m_mask = get_non_pad_mask(positional)
L
lifuchen 已提交
107 108 109 110 111
            mask = get_attn_key_pad_mask((positional == 0).astype(np.float32),
                                         query)
            triu_tensor = dg.to_variable(
                get_triu_tensor(query.numpy(), query.numpy())).astype(
                    np.float32)
L
lifuchen 已提交
112 113
            mask = mask + triu_tensor
            mask = fluid.layers.cast(mask == 0, np.float32)
L
lifuchen 已提交
114

L
lifuchen 已提交
115
            # (batch_size, decoder_len, encoder_len)
L
lifuchen 已提交
116 117
            zero_mask = get_attn_key_pad_mask(
                layers.squeeze(c_mask, [-1]), query)
L
lifuchen 已提交
118
        else:
L
lifuchen 已提交
119 120
            mask = get_triu_tensor(query.numpy(),
                                   query.numpy()).astype(np.float32)
L
lifuchen 已提交
121 122 123 124 125
            mask = fluid.layers.cast(dg.to_variable(mask == 0), np.float32)
            m_mask, zero_mask = None, None

        # Decoder pre-network
        query = self.decoder_prenet(query)
L
lifuchen 已提交
126

L
lifuchen 已提交
127 128 129 130 131 132 133 134 135 136 137 138 139
        # Centered position
        query = self.linear(query)

        # Get position embedding
        positional = self.pos_emb(positional)
        query = positional * self.alpha + query

        #positional dropout
        query = fluid.layers.dropout(query, 0.1)

        # Attention decoder-decoder, encoder-decoder
        selfattn_list = list()
        attn_list = list()
L
lifuchen 已提交
140 141 142 143 144 145 146

        for selfattn, attn, ffn in zip(self.selfattn_layers, self.attn_layers,
                                       self.ffns):
            query, attn_dec = selfattn(
                query, query, query, mask=mask, query_mask=m_mask)
            query, attn_dot = attn(
                key, value, query, mask=zero_mask, query_mask=m_mask)
L
lifuchen 已提交
147 148 149 150 151 152 153 154
            query = ffn(query)
            selfattn_list.append(attn_dec)
            attn_list.append(attn_dot)
        # Mel linear projection
        mel_out = self.mel_linear(query)
        # Post Mel Network
        out = self.postconvnet(mel_out)
        out = mel_out + out
L
lifuchen 已提交
155

L
lifuchen 已提交
156 157 158 159 160 161
        # Stop tokens
        stop_tokens = self.stop_linear(query)
        stop_tokens = layers.squeeze(stop_tokens, [-1])
        stop_tokens = layers.sigmoid(stop_tokens)

        return mel_out, out, attn_list, stop_tokens, selfattn_list