app.py 2.2 KB
Newer Older
Z
Zth9730 已提交
1 2 3 4 5 6 7
import gradio as gr
import os
from paddlespeech.cli.asr.infer import ASRExecutor
from paddlespeech.cli.text.infer import TextExecutor
import librosa
import soundfile as sf

Z
Zth9730 已提交
8 9 10 11 12 13 14 15 16
os.system("wget -c 'https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav'")
asr = ASRExecutor()
text_punc = TextExecutor()
tmp_result = asr(audio_file='zh.wav',
                 model='conformer_online_wenetspeech',
                 device="cpu")
tmp_result = text_punc(
    text=tmp_result, model='ernie_linear_p7_wudao', device="cpu")

Z
Zth9730 已提交
17 18 19 20

def model_inference(audio):
    if not isinstance(audio, str):
        audio = str(audio.name)
Z
Zth9730 已提交
21
    y, sr = sf.read(audio)
Z
Zth9730 已提交
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
    if sr != 16000:  # Optional resample to 16000
        y = librosa.resample(y, sr, 16000)
        sf.write(audio, y, 16000)
    result = asr(audio_file=audio,
                 model='conformer_online_wenetspeech',
                 device="cpu")
    result = text_punc(
        text=result, model='ernie_linear_p7_wudao', device="cpu")
    return result


def clear_all():
    return None, None, None, None


with gr.Blocks() as demo:
    gr.Markdown("ASR")

    with gr.Column(scale=1, min_width=100):
        audio_input = gr.Audio(
            value='https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav',
            type="file",
            label=" Input From File")
        micro_input = gr.inputs.Audio(
            source="microphone", type='filepath', label="Input From Mic")

        with gr.Row():
            btn1 = gr.Button("Clear")
            btn2 = gr.Button("Submit File")
            btn3 = gr.Button("Submit Micro")

        audio_text_output = gr.Textbox(placeholder="Result...", lines=10)
        micro_text_output = gr.Textbox(placeholder="Micro Result...", lines=10)

    btn3.click(
        fn=model_inference,
        inputs=[micro_input],
        outputs=micro_text_output,
        scroll_to_output=True)
    btn2.click(
        fn=model_inference,
        inputs=[audio_input],
        outputs=audio_text_output,
        scroll_to_output=True)
    btn1.click(
        fn=clear_all,
        inputs=None,
        outputs=[
            audio_input, micro_input, audio_text_output, micro_text_output
        ])

    gr.Button.style(1)

D
DanielYang 已提交
75
demo.launch()