synthesize_e2e.py 8.9 KB
Newer Older
小湉湉's avatar
小湉湉 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pathlib import Path

import paddle
import soundfile as sf
import yaml
20
from timer import timer
小湉湉's avatar
小湉湉 已提交
21 22
from yacs.config import CfgNode

小湉湉's avatar
小湉湉 已提交
23 24 25 26 27
from paddlespeech.t2s.exps.syn_utils import am_to_static
from paddlespeech.t2s.exps.syn_utils import get_am_inference
from paddlespeech.t2s.exps.syn_utils import get_frontend
from paddlespeech.t2s.exps.syn_utils import get_sentences
from paddlespeech.t2s.exps.syn_utils import get_voc_inference
小湉湉's avatar
小湉湉 已提交
28
from paddlespeech.t2s.exps.syn_utils import run_frontend
小湉湉's avatar
小湉湉 已提交
29
from paddlespeech.t2s.exps.syn_utils import voc_to_static
30
from paddlespeech.t2s.utils import str2bool
小湉湉's avatar
小湉湉 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46


def evaluate(args):

    # Init body.
    with open(args.am_config) as f:
        am_config = CfgNode(yaml.safe_load(f))
    with open(args.voc_config) as f:
        voc_config = CfgNode(yaml.safe_load(f))

    print("========Args========")
    print(yaml.safe_dump(vars(args)))
    print("========Config========")
    print(am_config)
    print(voc_config)

小湉湉's avatar
小湉湉 已提交
47
    sentences = get_sentences(text_file=args.text, lang=args.lang)
小湉湉's avatar
小湉湉 已提交
48 49

    # frontend
小湉湉's avatar
小湉湉 已提交
50 51 52
    frontend = get_frontend(
        lang=args.lang,
        phones_dict=args.phones_dict,
53 54
        tones_dict=args.tones_dict,
        use_rhy=args.use_rhy)
小湉湉's avatar
小湉湉 已提交
55
    print("frontend done!")
小湉湉's avatar
小湉湉 已提交
56 57

    # acoustic model
小湉湉's avatar
小湉湉 已提交
58 59 60 61 62 63 64 65 66 67 68
    am_name = args.am[:args.am.rindex('_')]
    am_dataset = args.am[args.am.rindex('_') + 1:]

    am_inference = get_am_inference(
        am=args.am,
        am_config=am_config,
        am_ckpt=args.am_ckpt,
        am_stat=args.am_stat,
        phones_dict=args.phones_dict,
        tones_dict=args.tones_dict,
        speaker_dict=args.speaker_dict)
小湉湉's avatar
小湉湉 已提交
69
    print("acoustic model done!")
小湉湉's avatar
小湉湉 已提交
70
    # vocoder
小湉湉's avatar
小湉湉 已提交
71 72 73 74 75
    voc_inference = get_voc_inference(
        voc=args.voc,
        voc_config=voc_config,
        voc_ckpt=args.voc_ckpt,
        voc_stat=args.voc_stat)
小湉湉's avatar
小湉湉 已提交
76
    print("voc done!")
小湉湉's avatar
小湉湉 已提交
77 78 79 80

    # whether dygraph to static
    if args.inference_dir:
        # acoustic model
小湉湉's avatar
小湉湉 已提交
81 82 83 84 85
        am_inference = am_to_static(
            am_inference=am_inference,
            am=args.am,
            inference_dir=args.inference_dir,
            speaker_dict=args.speaker_dict)
小湉湉's avatar
小湉湉 已提交
86
        # vocoder
小湉湉's avatar
小湉湉 已提交
87 88 89 90
        voc_inference = voc_to_static(
            voc_inference=voc_inference,
            voc=args.voc,
            inference_dir=args.inference_dir)
小湉湉's avatar
小湉湉 已提交
91 92 93

    output_dir = Path(args.output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)
94
    merge_sentences = False
小湉湉's avatar
小湉湉 已提交
95 96 97 98
    # Avoid not stopping at the end of a sub sentence when tacotron2_ljspeech dygraph to static graph
    # but still not stopping in the end (NOTE by yuantian01 Feb 9 2022)
    if am_name == 'tacotron2':
        merge_sentences = True
99 100 101 102 103

    get_tone_ids = False
    if am_name == 'speedyspeech':
        get_tone_ids = True

104 105
    N = 0
    T = 0
小湉湉's avatar
小湉湉 已提交
106
    for utt_id, sentence in sentences:
107
        with timer() as t:
小湉湉's avatar
小湉湉 已提交
108 109 110 111 112 113 114
            frontend_dict = run_frontend(
                frontend=frontend,
                text=sentence,
                merge_sentences=merge_sentences,
                get_tone_ids=get_tone_ids,
                lang=args.lang)
            phone_ids = frontend_dict['phone_ids']
115 116 117 118 119 120 121
            with paddle.no_grad():
                flags = 0
                for i in range(len(phone_ids)):
                    part_phone_ids = phone_ids[i]
                    # acoustic model
                    if am_name == 'fastspeech2':
                        # multi speaker
122
                        if am_dataset in {"aishell3", "vctk", "mix", "canton"}:
123 124 125 126 127
                            spk_id = paddle.to_tensor(args.spk_id)
                            mel = am_inference(part_phone_ids, spk_id)
                        else:
                            mel = am_inference(part_phone_ids)
                    elif am_name == 'speedyspeech':
小湉湉's avatar
小湉湉 已提交
128
                        part_tone_ids = frontend_dict['tone_ids'][i]
L
lym0302 已提交
129
                        if am_dataset in {"aishell3", "vctk", "mix"}:
130 131 132 133 134 135
                            spk_id = paddle.to_tensor(args.spk_id)
                            mel = am_inference(part_phone_ids, part_tone_ids,
                                               spk_id)
                        else:
                            mel = am_inference(part_phone_ids, part_tone_ids)
                    elif am_name == 'tacotron2':
136
                        mel = am_inference(part_phone_ids)
137 138 139 140 141
                    # vocoder
                    wav = voc_inference(mel)
                    if flags == 0:
                        wav_all = wav
                        flags = 1
142
                    else:
143 144 145 146 147 148 149 150 151
                        wav_all = paddle.concat([wav_all, wav])
        wav = wav_all.numpy()
        N += wav.size
        T += t.elapse
        speed = wav.size / t.elapse
        rtf = am_config.fs / speed
        print(
            f"{utt_id}, mel: {mel.shape}, wave: {wav.shape}, time: {t.elapse}s, Hz: {speed}, RTF: {rtf}."
        )
小湉湉's avatar
小湉湉 已提交
152
        sf.write(
153
            str(output_dir / (utt_id + ".wav")), wav, samplerate=am_config.fs)
小湉湉's avatar
小湉湉 已提交
154
        print(f"{utt_id} done!")
155
    print(f"generation speed: {N / T}Hz, RTF: {am_config.fs / (N / T) }")
小湉湉's avatar
小湉湉 已提交
156 157


小湉湉's avatar
小湉湉 已提交
158
def parse_args():
小湉湉's avatar
小湉湉 已提交
159
    # parse args and config
小湉湉's avatar
小湉湉 已提交
160 161 162 163 164 165 166 167
    parser = argparse.ArgumentParser(
        description="Synthesize with acoustic model & vocoder")
    # acoustic model
    parser.add_argument(
        '--am',
        type=str,
        default='fastspeech2_csmsc',
        choices=[
168
            'speedyspeech_csmsc', 'speedyspeech_aishell3', 'fastspeech2_csmsc',
J
Jerryuhoo 已提交
169
            'fastspeech2_ljspeech', 'fastspeech2_aishell3', 'fastspeech2_vctk',
170 171
            'tacotron2_csmsc', 'tacotron2_ljspeech', 'fastspeech2_mix',
            'fastspeech2_canton'
小湉湉's avatar
小湉湉 已提交
172 173 174
        ],
        help='Choose acoustic model type of tts task.')
    parser.add_argument(
H
Hui Zhang 已提交
175
        '--am_config', type=str, default=None, help='Config of acoustic model.')
小湉湉's avatar
小湉湉 已提交
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
    parser.add_argument(
        '--am_ckpt',
        type=str,
        default=None,
        help='Checkpoint file of acoustic model.')
    parser.add_argument(
        "--am_stat",
        type=str,
        default=None,
        help="mean and standard deviation used to normalize spectrogram when training acoustic model."
    )
    parser.add_argument(
        "--phones_dict", type=str, default=None, help="phone vocabulary file.")
    parser.add_argument(
        "--tones_dict", type=str, default=None, help="tone vocabulary file.")
    parser.add_argument(
        "--speaker_dict", type=str, default=None, help="speaker id map file.")
    parser.add_argument(
        '--spk_id',
        type=int,
        default=0,
        help='spk id for multi speaker acoustic model')
    # vocoder
    parser.add_argument(
        '--voc',
        type=str,
        default='pwgan_csmsc',
        choices=[
204 205 206 207 208 209 210 211 212 213 214
            'pwgan_csmsc',
            'pwgan_ljspeech',
            'pwgan_aishell3',
            'pwgan_vctk',
            'mb_melgan_csmsc',
            'style_melgan_csmsc',
            'hifigan_csmsc',
            'hifigan_ljspeech',
            'hifigan_aishell3',
            'hifigan_vctk',
            'wavernn_csmsc',
小湉湉's avatar
小湉湉 已提交
215 216 217
        ],
        help='Choose vocoder type of tts task.')
    parser.add_argument(
H
Hui Zhang 已提交
218
        '--voc_config', type=str, default=None, help='Config of voc.')
小湉湉's avatar
小湉湉 已提交
219 220 221 222 223 224 225 226 227 228 229 230 231
    parser.add_argument(
        '--voc_ckpt', type=str, default=None, help='Checkpoint file of voc.')
    parser.add_argument(
        "--voc_stat",
        type=str,
        default=None,
        help="mean and standard deviation used to normalize spectrogram when training voc."
    )
    # other
    parser.add_argument(
        '--lang',
        type=str,
        default='zh',
L
lym0302 已提交
232
        help='Choose model language. zh or en or mix')
小湉湉's avatar
小湉湉 已提交
233 234 235 236 237 238 239 240 241 242 243 244 245

    parser.add_argument(
        "--inference_dir",
        type=str,
        default=None,
        help="dir to save inference models")
    parser.add_argument(
        "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.")
    parser.add_argument(
        "--text",
        type=str,
        help="text to synthesize, a 'utt_id sentence' pair per line.")
    parser.add_argument("--output_dir", type=str, help="output dir.")
246 247 248 249 250
    parser.add_argument(
        "--use_rhy",
        type=str2bool,
        default=False,
        help="run rhythm frontend or not")
小湉湉's avatar
小湉湉 已提交
251 252

    args = parser.parse_args()
小湉湉's avatar
小湉湉 已提交
253 254 255 256 257
    return args


def main():
    args = parse_args()
小湉湉's avatar
小湉湉 已提交
258 259 260 261 262 263 264 265 266 267 268 269 270

    if args.ngpu == 0:
        paddle.set_device("cpu")
    elif args.ngpu > 0:
        paddle.set_device("gpu")
    else:
        print("ngpu should >= 0 !")

    evaluate(args)


if __name__ == "__main__":
    main()