synthesis.py 6.0 KB
Newer Older
L
lifuchen 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
L
lifuchen 已提交
14 15 16 17 18
import os
from scipy.io.wavfile import write
from parakeet.g2p.en import text_to_sequence
import numpy as np
from tqdm import tqdm
19
from matplotlib import cm
L
lifuchen 已提交
20
from tensorboardX import SummaryWriter
L
lifuchen 已提交
21
from ruamel import yaml
L
lifuchen 已提交
22 23 24
import paddle.fluid as fluid
import paddle.fluid.dygraph as dg
from pathlib import Path
L
lifuchen 已提交
25
import argparse
L
lifuchen 已提交
26 27
from parse import add_config_options_to_parser
from pprint import pprint
L
lifuchen 已提交
28
from collections import OrderedDict
29
from parakeet.models.transformer_tts.utils import *
L
lifuchen 已提交
30
from parakeet import audio
L
lifuchen 已提交
31
from parakeet.models.transformer_tts.vocoder import Vocoder
L
lifuchen 已提交
32
from parakeet.models.transformer_tts.transformer_tts import TransformerTTS
L
lifuchen 已提交
33

L
lifuchen 已提交
34

L
lifuchen 已提交
35
def load_checkpoint(step, model_path):
L
lifuchen 已提交
36 37 38 39 40 41 42 43
    model_dict, _ = fluid.dygraph.load_dygraph(os.path.join(model_path, step))
    new_state_dict = OrderedDict()
    for param in model_dict:
        if param.startswith('_layers.'):
            new_state_dict[param[8:]] = model_dict[param]
        else:
            new_state_dict[param] = model_dict[param]
    return new_state_dict
L
lifuchen 已提交
44

L
lifuchen 已提交
45

L
lifuchen 已提交
46 47 48 49 50
def synthesis(text_input, args):
    place = (fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace())

    with open(args.config_path) as f:
        cfg = yaml.load(f, Loader=yaml.Loader)
L
lifuchen 已提交
51 52

    # tensorboard
L
lifuchen 已提交
53
    if not os.path.exists(args.log_dir):
L
lifuchen 已提交
54 55
        os.mkdir(args.log_dir)
    path = os.path.join(args.log_dir, 'synthesis')
L
lifuchen 已提交
56 57 58 59

    writer = SummaryWriter(path)

    with dg.guard(place):
L
lifuchen 已提交
60 61
        with fluid.unique_name.guard():
            model = TransformerTTS(cfg)
L
lifuchen 已提交
62 63 64 65
            model.set_dict(
                load_checkpoint(
                    str(args.transformer_step),
                    os.path.join(args.checkpoint_path, "transformer")))
L
lifuchen 已提交
66
            model.eval()
L
lifuchen 已提交
67

L
lifuchen 已提交
68
        with fluid.unique_name.guard():
L
lifuchen 已提交
69
            model_vocoder = Vocoder(cfg, args.batch_size)
L
lifuchen 已提交
70 71 72 73
            model_vocoder.set_dict(
                load_checkpoint(
                    str(args.vocoder_step),
                    os.path.join(args.checkpoint_path, "vocoder")))
L
lifuchen 已提交
74
            model_vocoder.eval()
L
lifuchen 已提交
75 76
        # init input
        text = np.asarray(text_to_sequence(text_input))
L
lifuchen 已提交
77 78 79 80
        text = fluid.layers.unsqueeze(dg.to_variable(text), [0])
        mel_input = dg.to_variable(np.zeros([1, 1, 80])).astype(np.float32)
        pos_text = np.arange(1, text.shape[1] + 1)
        pos_text = fluid.layers.unsqueeze(dg.to_variable(pos_text), [0])
L
lifuchen 已提交
81

L
lifuchen 已提交
82
        pbar = tqdm(range(args.max_len))
L
lifuchen 已提交
83
        for i in pbar:
84 85 86
            dec_slf_mask = get_triu_tensor(
                mel_input.numpy(), mel_input.numpy()).astype(np.float32)
            dec_slf_mask = fluid.layers.cast(
87
                dg.to_variable(dec_slf_mask != 0), np.float32) * (-2**32 + 1)
L
lifuchen 已提交
88 89 90
            pos_mel = np.arange(1, mel_input.shape[1] + 1)
            pos_mel = fluid.layers.unsqueeze(dg.to_variable(pos_mel), [0])
            mel_pred, postnet_pred, attn_probs, stop_preds, attn_enc, attn_dec = model(
91
                text, mel_input, pos_text, pos_mel, dec_slf_mask)
L
lifuchen 已提交
92 93
            mel_input = fluid.layers.concat(
                [mel_input, postnet_pred[:, -1:, :]], axis=1)
94

L
lifuchen 已提交
95
        mag_pred = model_vocoder(postnet_pred)
L
lifuchen 已提交
96

L
lifuchen 已提交
97
        _ljspeech_processor = audio.AudioProcessor(
L
lifuchen 已提交
98 99 100 101 102 103 104
            sample_rate=cfg['audio']['sr'],
            num_mels=cfg['audio']['num_mels'],
            min_level_db=cfg['audio']['min_level_db'],
            ref_level_db=cfg['audio']['ref_level_db'],
            n_fft=cfg['audio']['n_fft'],
            win_length=cfg['audio']['win_length'],
            hop_length=cfg['audio']['hop_length'],
L
lifuchen 已提交
105 106
            power=cfg['audio']['power'],
            preemphasis=cfg['audio']['preemphasis'],
L
lifuchen 已提交
107 108 109 110 111 112 113 114 115 116
            signal_norm=True,
            symmetric_norm=False,
            max_norm=1.,
            mel_fmin=0,
            mel_fmax=None,
            clip_norm=True,
            griffin_lim_iters=60,
            do_trim_silence=False,
            sound_norm=False)

L
lifuchen 已提交
117 118 119
        wav = _ljspeech_processor.inv_spectrogram(
            fluid.layers.transpose(
                fluid.layers.squeeze(mag_pred, [0]), [1, 0]).numpy())
120 121 122 123
        global_step = 0
        for i, prob in enumerate(attn_probs):
            for j in range(4):
                x = np.uint8(cm.viridis(prob.numpy()[j]) * 255)
124 125 126 127 128
                writer.add_image(
                    'Attention_%d_0' % global_step,
                    x,
                    i * 4 + j,
                    dataformats="HWC")
129 130 131 132

        for i, prob in enumerate(attn_enc):
            for j in range(4):
                x = np.uint8(cm.viridis(prob.numpy()[j]) * 255)
133 134 135 136 137
                writer.add_image(
                    'Attention_enc_%d_0' % global_step,
                    x,
                    i * 4 + j,
                    dataformats="HWC")
138 139 140 141

        for i, prob in enumerate(attn_dec):
            for j in range(4):
                x = np.uint8(cm.viridis(prob.numpy()[j]) * 255)
142 143 144 145 146
                writer.add_image(
                    'Attention_dec_%d_0' % global_step,
                    x,
                    i * 4 + j,
                    dataformats="HWC")
L
lifuchen 已提交
147 148 149
        writer.add_audio(text_input, wav, 0, cfg['audio']['sr'])
        if not os.path.exists(args.sample_path):
            os.mkdir(args.sample_path)
L
lifuchen 已提交
150 151 152
        write(
            os.path.join(args.sample_path, 'test.wav'), cfg['audio']['sr'],
            wav)
L
lifuchen 已提交
153
    writer.close()
L
lifuchen 已提交
154

L
lifuchen 已提交
155

L
lifuchen 已提交
156
if __name__ == '__main__':
L
lifuchen 已提交
157
    parser = argparse.ArgumentParser(description="Synthesis model")
L
lifuchen 已提交
158
    add_config_options_to_parser(parser)
L
lifuchen 已提交
159
    args = parser.parse_args()
160 161
    synthesis("Parakeet stands for Paddle PARAllel text-to-speech toolkit.",
              args)