fastspeech.py 6.6 KB
Newer Older
L
lifuchen 已提交
1
import math
2
import numpy as np
L
lifuchen 已提交
3 4 5
import paddle.fluid.dygraph as dg
import paddle.fluid as fluid
from parakeet.g2p.text.symbols import symbols
6
from parakeet.models.transformer_tts.utils import *
L
lifuchen 已提交
7
from parakeet.models.transformer_tts.post_convnet import PostConvNet
L
lifuchen 已提交
8
from parakeet.models.fastspeech.length_regulator import LengthRegulator
L
lifuchen 已提交
9 10
from parakeet.models.fastspeech.encoder import Encoder
from parakeet.models.fastspeech.decoder import Decoder
L
lifuchen 已提交
11 12 13 14 15 16 17

class FastSpeech(dg.Layer):
    def __init__(self, cfg):
        " FastSpeech"
        super(FastSpeech, self).__init__()

        self.encoder = Encoder(n_src_vocab=len(symbols)+1,
L
lifuchen 已提交
18 19 20 21 22 23 24 25 26
                               len_max_seq=cfg['max_seq_len'],
                               n_layers=cfg['encoder_n_layer'],
                               n_head=cfg['encoder_head'],
                               d_k=cfg['fs_hidden_size'] // cfg['encoder_head'],
                               d_v=cfg['fs_hidden_size'] // cfg['encoder_head'],
                               d_model=cfg['fs_hidden_size'],
                               d_inner=cfg['encoder_conv1d_filter_size'],
                               fft_conv1d_kernel=cfg['fft_conv1d_filter'], 
                               fft_conv1d_padding=cfg['fft_conv1d_padding'],
L
lifuchen 已提交
27
                               dropout=0.1)
L
lifuchen 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40
        self.length_regulator = LengthRegulator(input_size=cfg['fs_hidden_size'], 
                                                out_channels=cfg['duration_predictor_output_size'], 
                                                filter_size=cfg['duration_predictor_filter_size'], 
                                                dropout=cfg['dropout'])
        self.decoder = Decoder(len_max_seq=cfg['max_seq_len'],
                                n_layers=cfg['decoder_n_layer'],
                                n_head=cfg['decoder_head'],
                                d_k=cfg['fs_hidden_size'] // cfg['decoder_head'],
                                d_v=cfg['fs_hidden_size'] // cfg['decoder_head'],
                                d_model=cfg['fs_hidden_size'],
                                d_inner=cfg['decoder_conv1d_filter_size'],
                                fft_conv1d_kernel=cfg['fft_conv1d_filter'], 
                                fft_conv1d_padding=cfg['fft_conv1d_padding'],
L
lifuchen 已提交
41
                                dropout=0.1)
L
lifuchen 已提交
42
        self.weight = fluid.ParamAttr(initializer = fluid.initializer.XavierInitializer())
L
lifuchen 已提交
43
        k = math.sqrt(1 / cfg['fs_hidden_size'])
L
lifuchen 已提交
44
        self.bias = fluid.ParamAttr(initializer = fluid.initializer.Uniform(low=-k, high=k))
L
lifuchen 已提交
45 46
        self.mel_linear = dg.Linear(cfg['fs_hidden_size'], 
                                    cfg['audio']['num_mels']* cfg['audio']['outputs_per_step'],
L
lifuchen 已提交
47 48
                                    param_attr = self.weight,
                                    bias_attr = self.bias,)
L
lifuchen 已提交
49
        self.postnet = PostConvNet(n_mels=cfg['audio']['num_mels'],
L
lifuchen 已提交
50 51 52 53
                 num_hidden=512,
                 filter_size=5,
                 padding=int(5 / 2),
                 num_conv=5,
L
lifuchen 已提交
54
                 outputs_per_step=cfg['audio']['outputs_per_step'],
L
lifuchen 已提交
55
                 use_cudnn=True,
L
lifuchen 已提交
56 57
                 dropout=0.1,
                 batchnorm_last=True)
L
lifuchen 已提交
58

59 60 61
    def forward(self, character, text_pos, enc_non_pad_mask, dec_non_pad_mask,
                 enc_slf_attn_mask=None, dec_slf_attn_mask=None,
                 mel_pos=None, length_target=None, alpha=1.0):
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
        """
        FastSpeech model.
        
        Args:
            character (Variable): Shape(B, T_text), dtype: float32. The input text
                characters. T_text means the timesteps of input characters.
            text_pos (Variable): Shape(B, T_text), dtype: int64. The input text
                position. T_text means the timesteps of input characters.
            mel_pos (Variable, optional): Shape(B, T_mel),
                dtype: int64. The spectrum position. T_mel means the timesteps of input spectrum.
            length_target (Variable, optional): Shape(B, T_text),
                dtype: int64. The duration of phoneme compute from pretrained transformerTTS.
            alpha (Constant): 
                dtype: float32. The hyperparameter to determine the length of the expanded sequence 
                mel, thereby controlling the voice speed.

        Returns:
            mel_output (Variable), Shape(B, mel_T, C), the mel output before postnet.
            mel_output_postnet (Variable), Shape(B, mel_T, C), the mel output after postnet.
            duration_predictor_output (Variable), Shape(B, text_T), the duration of phoneme compute 
            with duration predictor.
            enc_slf_attn_list (Variable), Shape(B, text_T, text_T), the encoder self attention list.
            dec_slf_attn_list (Variable), Shape(B, mel_T, mel_T), the decoder self attention list.
        """

87
        encoder_output, enc_slf_attn_list = self.encoder(character, text_pos, enc_non_pad_mask, slf_attn_mask=enc_slf_attn_mask)
L
lifuchen 已提交
88 89 90 91 92
        if fluid.framework._dygraph_tracer()._train_mode:
            
            length_regulator_output, duration_predictor_output = self.length_regulator(encoder_output,
                                                                                       target=length_target,
                                                                                       alpha=alpha)
93 94 95
            decoder_output, dec_slf_attn_list = self.decoder(length_regulator_output, mel_pos, 
                                                             dec_non_pad_mask, 
                                                             slf_attn_mask=dec_slf_attn_mask)
L
lifuchen 已提交
96 97 98 99 100 101 102

            mel_output = self.mel_linear(decoder_output)
            mel_output_postnet = self.postnet(mel_output) + mel_output

            return mel_output, mel_output_postnet, duration_predictor_output, enc_slf_attn_list, dec_slf_attn_list
        else:
            length_regulator_output, decoder_pos = self.length_regulator(encoder_output, alpha=alpha)
103 104 105 106 107 108
            slf_attn_mask = get_triu_tensor(decoder_pos.numpy(), decoder_pos.numpy()).astype(np.float32)
            slf_attn_mask = fluid.layers.cast(dg.to_variable(slf_attn_mask == 0), np.float32)
            slf_attn_mask = dg.to_variable(slf_attn_mask)
            dec_non_pad_mask = fluid.layers.unsqueeze((decoder_pos != 0).astype(np.float32), [-1])
            decoder_output, _ = self.decoder(length_regulator_output, decoder_pos, dec_non_pad_mask, 
                                             slf_attn_mask=slf_attn_mask)
L
lifuchen 已提交
109 110 111 112
            mel_output = self.mel_linear(decoder_output)
            mel_output_postnet = self.postnet(mel_output) + mel_output

            return mel_output, mel_output_postnet