synthesize_e2e.py 9.0 KB
Newer Older
小湉湉's avatar
小湉湉 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pathlib import Path

import paddle
import soundfile as sf
import yaml
20
from timer import timer
小湉湉's avatar
小湉湉 已提交
21 22
from yacs.config import CfgNode

小湉湉's avatar
小湉湉 已提交
23 24 25 26 27 28
from paddlespeech.t2s.exps.syn_utils import am_to_static
from paddlespeech.t2s.exps.syn_utils import get_am_inference
from paddlespeech.t2s.exps.syn_utils import get_frontend
from paddlespeech.t2s.exps.syn_utils import get_sentences
from paddlespeech.t2s.exps.syn_utils import get_voc_inference
from paddlespeech.t2s.exps.syn_utils import voc_to_static
小湉湉's avatar
小湉湉 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44


def evaluate(args):

    # Init body.
    with open(args.am_config) as f:
        am_config = CfgNode(yaml.safe_load(f))
    with open(args.voc_config) as f:
        voc_config = CfgNode(yaml.safe_load(f))

    print("========Args========")
    print(yaml.safe_dump(vars(args)))
    print("========Config========")
    print(am_config)
    print(voc_config)

小湉湉's avatar
小湉湉 已提交
45
    sentences = get_sentences(text_file=args.text, lang=args.lang)
小湉湉's avatar
小湉湉 已提交
46 47

    # frontend
小湉湉's avatar
小湉湉 已提交
48 49 50 51
    frontend = get_frontend(
        lang=args.lang,
        phones_dict=args.phones_dict,
        tones_dict=args.tones_dict)
小湉湉's avatar
小湉湉 已提交
52 53

    # acoustic model
小湉湉's avatar
小湉湉 已提交
54 55 56 57 58 59 60 61 62 63 64
    am_name = args.am[:args.am.rindex('_')]
    am_dataset = args.am[args.am.rindex('_') + 1:]

    am_inference = get_am_inference(
        am=args.am,
        am_config=am_config,
        am_ckpt=args.am_ckpt,
        am_stat=args.am_stat,
        phones_dict=args.phones_dict,
        tones_dict=args.tones_dict,
        speaker_dict=args.speaker_dict)
小湉湉's avatar
小湉湉 已提交
65 66

    # vocoder
小湉湉's avatar
小湉湉 已提交
67 68 69 70 71
    voc_inference = get_voc_inference(
        voc=args.voc,
        voc_config=voc_config,
        voc_ckpt=args.voc_ckpt,
        voc_stat=args.voc_stat)
小湉湉's avatar
小湉湉 已提交
72 73 74 75

    # whether dygraph to static
    if args.inference_dir:
        # acoustic model
小湉湉's avatar
小湉湉 已提交
76 77 78 79 80
        am_inference = am_to_static(
            am_inference=am_inference,
            am=args.am,
            inference_dir=args.inference_dir,
            speaker_dict=args.speaker_dict)
小湉湉's avatar
小湉湉 已提交
81 82

        # vocoder
小湉湉's avatar
小湉湉 已提交
83 84 85 86
        voc_inference = voc_to_static(
            voc_inference=voc_inference,
            voc=args.voc,
            inference_dir=args.inference_dir)
小湉湉's avatar
小湉湉 已提交
87 88 89

    output_dir = Path(args.output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)
90
    merge_sentences = False
小湉湉's avatar
小湉湉 已提交
91 92 93 94
    # Avoid not stopping at the end of a sub sentence when tacotron2_ljspeech dygraph to static graph
    # but still not stopping in the end (NOTE by yuantian01 Feb 9 2022)
    if am_name == 'tacotron2':
        merge_sentences = True
95 96 97 98 99

    get_tone_ids = False
    if am_name == 'speedyspeech':
        get_tone_ids = True

100 101
    N = 0
    T = 0
小湉湉's avatar
小湉湉 已提交
102
    for utt_id, sentence in sentences:
103 104 105 106 107 108 109 110 111 112 113 114 115
        with timer() as t:
            if args.lang == 'zh':
                input_ids = frontend.get_input_ids(
                    sentence,
                    merge_sentences=merge_sentences,
                    get_tone_ids=get_tone_ids)
                phone_ids = input_ids["phone_ids"]
                if get_tone_ids:
                    tone_ids = input_ids["tone_ids"]
            elif args.lang == 'en':
                input_ids = frontend.get_input_ids(
                    sentence, merge_sentences=merge_sentences)
                phone_ids = input_ids["phone_ids"]
L
lym0302 已提交
116 117 118 119
            elif args.lang == 'mix':
                input_ids = frontend.get_input_ids(
                    sentence, merge_sentences=merge_sentences)
                phone_ids = input_ids["phone_ids"]
120
            else:
L
lym0302 已提交
121
                print("lang should in {'zh', 'en', 'mix'}!")
122 123 124 125 126 127 128
            with paddle.no_grad():
                flags = 0
                for i in range(len(phone_ids)):
                    part_phone_ids = phone_ids[i]
                    # acoustic model
                    if am_name == 'fastspeech2':
                        # multi speaker
L
lym0302 已提交
129
                        if am_dataset in {"aishell3", "vctk", "mix"}:
130 131 132 133 134 135 136 137 138 139 140 141 142
                            spk_id = paddle.to_tensor(args.spk_id)
                            mel = am_inference(part_phone_ids, spk_id)
                        else:
                            mel = am_inference(part_phone_ids)
                    elif am_name == 'speedyspeech':
                        part_tone_ids = tone_ids[i]
                        if am_dataset in {"aishell3", "vctk"}:
                            spk_id = paddle.to_tensor(args.spk_id)
                            mel = am_inference(part_phone_ids, part_tone_ids,
                                               spk_id)
                        else:
                            mel = am_inference(part_phone_ids, part_tone_ids)
                    elif am_name == 'tacotron2':
143
                        mel = am_inference(part_phone_ids)
144 145 146 147 148
                    # vocoder
                    wav = voc_inference(mel)
                    if flags == 0:
                        wav_all = wav
                        flags = 1
149
                    else:
150 151 152 153 154 155 156 157 158
                        wav_all = paddle.concat([wav_all, wav])
        wav = wav_all.numpy()
        N += wav.size
        T += t.elapse
        speed = wav.size / t.elapse
        rtf = am_config.fs / speed
        print(
            f"{utt_id}, mel: {mel.shape}, wave: {wav.shape}, time: {t.elapse}s, Hz: {speed}, RTF: {rtf}."
        )
小湉湉's avatar
小湉湉 已提交
159
        sf.write(
160
            str(output_dir / (utt_id + ".wav")), wav, samplerate=am_config.fs)
小湉湉's avatar
小湉湉 已提交
161
        print(f"{utt_id} done!")
162
    print(f"generation speed: {N / T}Hz, RTF: {am_config.fs / (N / T) }")
小湉湉's avatar
小湉湉 已提交
163 164


小湉湉's avatar
小湉湉 已提交
165
def parse_args():
小湉湉's avatar
小湉湉 已提交
166
    # parse args and config
小湉湉's avatar
小湉湉 已提交
167 168 169 170 171 172 173 174
    parser = argparse.ArgumentParser(
        description="Synthesize with acoustic model & vocoder")
    # acoustic model
    parser.add_argument(
        '--am',
        type=str,
        default='fastspeech2_csmsc',
        choices=[
175
            'speedyspeech_csmsc', 'speedyspeech_aishell3', 'fastspeech2_csmsc',
J
Jerryuhoo 已提交
176
            'fastspeech2_ljspeech', 'fastspeech2_aishell3', 'fastspeech2_vctk',
L
lym0302 已提交
177
            'tacotron2_csmsc', 'tacotron2_ljspeech', 'fastspeech2_mix'
小湉湉's avatar
小湉湉 已提交
178 179 180
        ],
        help='Choose acoustic model type of tts task.')
    parser.add_argument(
H
Hui Zhang 已提交
181
        '--am_config', type=str, default=None, help='Config of acoustic model.')
小湉湉's avatar
小湉湉 已提交
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
    parser.add_argument(
        '--am_ckpt',
        type=str,
        default=None,
        help='Checkpoint file of acoustic model.')
    parser.add_argument(
        "--am_stat",
        type=str,
        default=None,
        help="mean and standard deviation used to normalize spectrogram when training acoustic model."
    )
    parser.add_argument(
        "--phones_dict", type=str, default=None, help="phone vocabulary file.")
    parser.add_argument(
        "--tones_dict", type=str, default=None, help="tone vocabulary file.")
    parser.add_argument(
        "--speaker_dict", type=str, default=None, help="speaker id map file.")
    parser.add_argument(
        '--spk_id',
        type=int,
        default=0,
        help='spk id for multi speaker acoustic model')
    # vocoder
    parser.add_argument(
        '--voc',
        type=str,
        default='pwgan_csmsc',
        choices=[
210 211 212 213 214 215 216 217 218 219 220
            'pwgan_csmsc',
            'pwgan_ljspeech',
            'pwgan_aishell3',
            'pwgan_vctk',
            'mb_melgan_csmsc',
            'style_melgan_csmsc',
            'hifigan_csmsc',
            'hifigan_ljspeech',
            'hifigan_aishell3',
            'hifigan_vctk',
            'wavernn_csmsc',
小湉湉's avatar
小湉湉 已提交
221 222 223
        ],
        help='Choose vocoder type of tts task.')
    parser.add_argument(
H
Hui Zhang 已提交
224
        '--voc_config', type=str, default=None, help='Config of voc.')
小湉湉's avatar
小湉湉 已提交
225 226 227 228 229 230 231 232 233 234 235 236 237
    parser.add_argument(
        '--voc_ckpt', type=str, default=None, help='Checkpoint file of voc.')
    parser.add_argument(
        "--voc_stat",
        type=str,
        default=None,
        help="mean and standard deviation used to normalize spectrogram when training voc."
    )
    # other
    parser.add_argument(
        '--lang',
        type=str,
        default='zh',
L
lym0302 已提交
238
        help='Choose model language. zh or en or mix')
小湉湉's avatar
小湉湉 已提交
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253

    parser.add_argument(
        "--inference_dir",
        type=str,
        default=None,
        help="dir to save inference models")
    parser.add_argument(
        "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.")
    parser.add_argument(
        "--text",
        type=str,
        help="text to synthesize, a 'utt_id sentence' pair per line.")
    parser.add_argument("--output_dir", type=str, help="output dir.")

    args = parser.parse_args()
小湉湉's avatar
小湉湉 已提交
254 255 256 257 258
    return args


def main():
    args = parse_args()
小湉湉's avatar
小湉湉 已提交
259 260 261 262 263 264 265 266 267 268 269 270 271

    if args.ngpu == 0:
        paddle.set_device("cpu")
    elif args.ngpu > 0:
        paddle.set_device("gpu")
    else:
        print("ngpu should >= 0 !")

    evaluate(args)


if __name__ == "__main__":
    main()