synthesis.py 4.2 KB
Newer Older
L
lifuchen 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
L
lifuchen 已提交
14 15 16
import os
from tensorboardX import SummaryWriter
from collections import OrderedDict
L
lifuchen 已提交
17
import argparse
L
lifuchen 已提交
18 19
from parse import add_config_options_to_parser
from pprint import pprint
L
lifuchen 已提交
20
from ruamel import yaml
L
lifuchen 已提交
21 22 23 24 25
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.dygraph as dg
from parakeet.g2p.en import text_to_sequence
from parakeet import audio
L
lifuchen 已提交
26
from parakeet.models.fastspeech.fastspeech import FastSpeech
27
from parakeet.models.transformer_tts.utils import *
L
lifuchen 已提交
28

L
lifuchen 已提交
29

L
lifuchen 已提交
30 31 32 33 34 35 36 37 38 39
def load_checkpoint(step, model_path):
    model_dict, _ = fluid.dygraph.load_dygraph(os.path.join(model_path, step))
    new_state_dict = OrderedDict()
    for param in model_dict:
        if param.startswith('_layers.'):
            new_state_dict[param[8:]] = model_dict[param]
        else:
            new_state_dict[param] = model_dict[param]
    return new_state_dict

L
lifuchen 已提交
40

L
lifuchen 已提交
41 42
def synthesis(text_input, args):
    place = (fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace())
L
lifuchen 已提交
43 44

    # tensorboard
L
lifuchen 已提交
45
    if not os.path.exists(args.log_dir):
L
lifuchen 已提交
46 47
        os.mkdir(args.log_dir)
    path = os.path.join(args.log_dir, 'synthesis')
L
lifuchen 已提交
48 49 50

    with open(args.config_path) as f:
        cfg = yaml.load(f, Loader=yaml.Loader)
L
lifuchen 已提交
51 52 53 54 55

    writer = SummaryWriter(path)

    with dg.guard(place):
        model = FastSpeech(cfg)
L
lifuchen 已提交
56 57 58 59
        model.set_dict(
            load_checkpoint(
                str(args.fastspeech_step),
                os.path.join(args.checkpoint_path, "fastspeech")))
L
lifuchen 已提交
60 61 62
        model.eval()

        text = np.asarray(text_to_sequence(text_input))
63
        text = np.expand_dims(text, axis=0)
L
lifuchen 已提交
64
        pos_text = np.arange(1, text.shape[1] + 1)
65 66
        pos_text = np.expand_dims(pos_text, axis=0)
        enc_non_pad_mask = get_non_pad_mask(pos_text).astype(np.float32)
67 68 69
        enc_slf_attn_mask = get_attn_key_pad_mask(pos_text,
                                                  text).astype(np.float32)

70 71 72 73
        text = dg.to_variable(text)
        pos_text = dg.to_variable(pos_text)
        enc_non_pad_mask = dg.to_variable(enc_non_pad_mask)
        enc_slf_attn_mask = dg.to_variable(enc_slf_attn_mask)
L
lifuchen 已提交
74

L
lifuchen 已提交
75
        mel_output, mel_output_postnet = model(
76 77 78 79 80 81 82
            text,
            pos_text,
            alpha=args.alpha,
            enc_non_pad_mask=enc_non_pad_mask,
            enc_slf_attn_mask=enc_slf_attn_mask,
            dec_non_pad_mask=None,
            dec_slf_attn_mask=None)
L
lifuchen 已提交
83 84

        _ljspeech_processor = audio.AudioProcessor(
L
lifuchen 已提交
85 86 87 88 89 90 91
            sample_rate=cfg['audio']['sr'],
            num_mels=cfg['audio']['num_mels'],
            min_level_db=cfg['audio']['min_level_db'],
            ref_level_db=cfg['audio']['ref_level_db'],
            n_fft=cfg['audio']['n_fft'],
            win_length=cfg['audio']['win_length'],
            hop_length=cfg['audio']['hop_length'],
L
lifuchen 已提交
92 93
            power=cfg['audio']['power'],
            preemphasis=cfg['audio']['preemphasis'],
L
lifuchen 已提交
94 95 96 97 98 99 100 101 102 103
            signal_norm=True,
            symmetric_norm=False,
            max_norm=1.,
            mel_fmin=0,
            mel_fmax=None,
            clip_norm=True,
            griffin_lim_iters=60,
            do_trim_silence=False,
            sound_norm=False)

L
lifuchen 已提交
104 105 106 107
        mel_output_postnet = fluid.layers.transpose(
            fluid.layers.squeeze(mel_output_postnet, [0]), [1, 0])
        wav = _ljspeech_processor.inv_melspectrogram(mel_output_postnet.numpy(
        ))
L
lifuchen 已提交
108
        writer.add_audio(text_input, wav, 0, cfg['audio']['sr'])
L
lifuchen 已提交
109 110 111
        print("Synthesis completed !!!")
    writer.close()

L
lifuchen 已提交
112

L
lifuchen 已提交
113
if __name__ == '__main__':
L
lifuchen 已提交
114
    parser = argparse.ArgumentParser(description="Train Fastspeech model")
L
lifuchen 已提交
115
    add_config_options_to_parser(parser)
L
lifuchen 已提交
116
    args = parser.parse_args()
L
lifuchen 已提交
117
    synthesis("Transformer model is so fast!", args)