synthesis.py 3.9 KB
Newer Older
L
lifuchen 已提交
1 2 3 4 5 6
import os
from scipy.io.wavfile import write
from parakeet.g2p.en import text_to_sequence
import numpy as np
from tqdm import tqdm
from tensorboardX import SummaryWriter
L
lifuchen 已提交
7
from ruamel import yaml
L
lifuchen 已提交
8 9 10
import paddle.fluid as fluid
import paddle.fluid.dygraph as dg
from pathlib import Path
L
lifuchen 已提交
11
import argparse
L
lifuchen 已提交
12 13
from parse import add_config_options_to_parser
from pprint import pprint
L
lifuchen 已提交
14 15
from collections import OrderedDict
from parakeet import audio
L
lifuchen 已提交
16
from parakeet.models.transformer_tts.vocoder import Vocoder
L
lifuchen 已提交
17
from parakeet.models.transformer_tts.transformer_tts import TransformerTTS
L
lifuchen 已提交
18 19

def load_checkpoint(step, model_path):
L
lifuchen 已提交
20 21 22 23 24 25 26 27
    model_dict, _ = fluid.dygraph.load_dygraph(os.path.join(model_path, step))
    new_state_dict = OrderedDict()
    for param in model_dict:
        if param.startswith('_layers.'):
            new_state_dict[param[8:]] = model_dict[param]
        else:
            new_state_dict[param] = model_dict[param]
    return new_state_dict
L
lifuchen 已提交
28

L
lifuchen 已提交
29 30 31 32 33
def synthesis(text_input, args):
    place = (fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace())

    with open(args.config_path) as f:
        cfg = yaml.load(f, Loader=yaml.Loader)
L
lifuchen 已提交
34 35

    # tensorboard
L
lifuchen 已提交
36 37 38
    if not os.path.exists(args.log_dir):
            os.mkdir(args.log_dir)
    path = os.path.join(args.log_dir,'synthesis')
L
lifuchen 已提交
39 40 41 42

    writer = SummaryWriter(path)

    with dg.guard(place):
L
lifuchen 已提交
43 44
        with fluid.unique_name.guard():
            model = TransformerTTS(cfg)
L
lifuchen 已提交
45
            model.set_dict(load_checkpoint(str(args.transformer_step), os.path.join(args.checkpoint_path, "transformer")))
L
lifuchen 已提交
46 47 48
            model.eval()
        
        with fluid.unique_name.guard():
L
lifuchen 已提交
49 50 51
            model_vocoder = Vocoder(cfg, args.batch_size)
            model_vocoder.set_dict(load_checkpoint(str(args.vocoder_step), os.path.join(args.checkpoint_path, "vocoder")))
            model_vocoder.eval()
L
lifuchen 已提交
52 53 54 55 56 57 58 59
        # init input
        text = np.asarray(text_to_sequence(text_input))
        text = fluid.layers.unsqueeze(dg.to_variable(text),[0])
        mel_input = dg.to_variable(np.zeros([1,1,80])).astype(np.float32)
        pos_text = np.arange(1, text.shape[1]+1)
        pos_text = fluid.layers.unsqueeze(dg.to_variable(pos_text),[0])
        

L
lifuchen 已提交
60
        pbar = tqdm(range(args.max_len))
L
lifuchen 已提交
61 62 63 64 65 66

        for i in pbar:
            pos_mel = np.arange(1, mel_input.shape[1]+1)
            pos_mel = fluid.layers.unsqueeze(dg.to_variable(pos_mel),[0])
            mel_pred, postnet_pred, attn_probs, stop_preds, attn_enc, attn_dec = model(text, mel_input, pos_text, pos_mel)
            mel_input = fluid.layers.concat([mel_input, postnet_pred[:,-1:,:]], axis=1)
L
lifuchen 已提交
67
        mag_pred = model_vocoder(postnet_pred)
L
lifuchen 已提交
68

L
lifuchen 已提交
69
        _ljspeech_processor = audio.AudioProcessor(
L
lifuchen 已提交
70 71 72 73 74 75 76 77 78
            sample_rate=cfg['audio']['sr'], 
            num_mels=cfg['audio']['num_mels'], 
            min_level_db=cfg['audio']['min_level_db'], 
            ref_level_db=cfg['audio']['ref_level_db'], 
            n_fft=cfg['audio']['n_fft'], 
            win_length= cfg['audio']['win_length'], 
            hop_length= cfg['audio']['hop_length'],
            power=cfg['audio']['power'],
            preemphasis=cfg['audio']['preemphasis'],
L
lifuchen 已提交
79 80 81 82 83 84 85 86 87 88
            signal_norm=True,
            symmetric_norm=False,
            max_norm=1.,
            mel_fmin=0,
            mel_fmax=None,
            clip_norm=True,
            griffin_lim_iters=60,
            do_trim_silence=False,
            sound_norm=False)

L
lifuchen 已提交
89
        wav = _ljspeech_processor.inv_spectrogram(fluid.layers.transpose(fluid.layers.squeeze(mag_pred,[0]), [1,0]).numpy())
L
lifuchen 已提交
90 91 92 93
        writer.add_audio(text_input, wav, 0, cfg['audio']['sr'])
        if not os.path.exists(args.sample_path):
            os.mkdir(args.sample_path)
        write(os.path.join(args.sample_path,'test.wav'), cfg['audio']['sr'], wav)
L
lifuchen 已提交
94
    writer.close()
L
lifuchen 已提交
95 96

if __name__ == '__main__':
L
lifuchen 已提交
97
    parser = argparse.ArgumentParser(description="Synthesis model")
L
lifuchen 已提交
98
    add_config_options_to_parser(parser)
L
lifuchen 已提交
99 100
    args = parser.parse_args()
    synthesis("Transformer model is so fast!", args)