decoder.py 3.1 KB
Newer Older
L
lifuchen 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
L
lifuchen 已提交
14 15
import paddle.fluid.dygraph as dg
import paddle.fluid as fluid
16
from parakeet.models.transformer_tts.utils import *
L
lifuchen 已提交
17
from parakeet.models.fastspeech.fft_block import FFTBlock
L
lifuchen 已提交
18

L
lifuchen 已提交
19

L
lifuchen 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
class Decoder(dg.Layer):
    def __init__(self,
                 len_max_seq,
                 n_layers,
                 n_head,
                 d_k,
                 d_v,
                 d_model,
                 d_inner,
                 fft_conv1d_kernel,
                 fft_conv1d_padding,
                 dropout=0.1):
        super(Decoder, self).__init__()

        n_position = len_max_seq + 1
35
        self.n_head = n_head
L
lifuchen 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
        self.pos_inp = get_sinusoid_encoding_table(
            n_position, d_model, padding_idx=0)
        self.position_enc = dg.Embedding(
            size=[n_position, d_model],
            padding_idx=0,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    self.pos_inp),
                trainable=False))
        self.layer_stack = [
            FFTBlock(
                d_model,
                d_inner,
                n_head,
                d_k,
                d_v,
                fft_conv1d_kernel,
                fft_conv1d_padding,
                dropout=dropout) for _ in range(n_layers)
        ]
L
lifuchen 已提交
56 57
        for i, layer in enumerate(self.layer_stack):
            self.add_sublayer('fft_{}'.format(i), layer)
L
lifuchen 已提交
58

59
    def forward(self, enc_seq, enc_pos, non_pad_mask, slf_attn_mask=None):
L
lifuchen 已提交
60 61 62 63 64 65 66 67 68 69 70 71 72
        """
        Decoder layer of FastSpeech.
        
        Args:
            enc_seq (Variable), Shape(B, text_T, C), dtype: float32. 
                The output of length regulator.
            enc_pos (Variable, optional): Shape(B, T_mel),
                dtype: int64. The spectrum position. T_mel means the timesteps of input spectrum.
        Returns:
            dec_output (Variable), Shape(B, mel_T, C), the decoder output.
            dec_slf_attn_list (Variable), Shape(B, mel_T, mel_T), the decoder self attention list.
        """
        dec_slf_attn_list = []
73
        slf_attn_mask = layers.expand(slf_attn_mask, [self.n_head, 1, 1])
L
lifuchen 已提交
74 75 76 77 78 79 80 81 82 83 84

        # -- Forward
        dec_output = enc_seq + self.position_enc(enc_pos)

        for dec_layer in self.layer_stack:
            dec_output, dec_slf_attn = dec_layer(
                dec_output,
                non_pad_mask=non_pad_mask,
                slf_attn_mask=slf_attn_mask)
            dec_slf_attn_list += [dec_slf_attn]

L
lifuchen 已提交
85
        return dec_output, dec_slf_attn_list