diff --git a/README_cn.md b/README_cn.md index c751b061dcbbfd4ee8c66bf494e006028a0a9ae1..66ba3c0ec4b026ed64a82a23a8ba7d9d2670a777 100644 --- a/README_cn.md +++ b/README_cn.md @@ -159,6 +159,7 @@ ### 近期更新 + - 👑 2022.05.13: PaddleSpeech 发布 [PP-ASR](./docs/source/asr/PPASR_cn.md) 流式语音识别系统、[PP-TTS](./docs/source/tts/PPTTS_cn.md) 流式语音合成系统、[PP-VPR](docs/source/vpr/PPVPR_cn.md) 全链路声纹识别系统 - 👏🏻 2022.05.06: PaddleSpeech Streaming Server 上线! 覆盖了语音识别(标点恢复、时间戳),和语音合成。 - 👏🏻 2022.05.06: PaddleSpeech Server 上线! 覆盖了声音分类、语音识别、语音合成、声纹识别,标点恢复。 diff --git a/demos/speech_web/.gitignore b/demos/speech_web/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..54418e6053342b672a3b7febb8183d331c1bee05 --- /dev/null +++ b/demos/speech_web/.gitignore @@ -0,0 +1,16 @@ +*/.vscode/* +*.wav +*/resource/* +.Ds* +*.pyc +*.pcm +*.npy +*.diff +*.sqlite +*/static/* +*.pdparams +*.pdiparams* +*.pdmodel +*/source/* +*/PaddleSpeech/* + diff --git a/demos/speech_web/README_cn.md b/demos/speech_web/README_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..0de4ed5122b217235e0018ae88d38f8c65b48176 --- /dev/null +++ b/demos/speech_web/README_cn.md @@ -0,0 +1,168 @@ +# Paddle Speech Demo + +PaddleSpeechDemo是一个以PaddleSpeech的语音交互功能为主体开发的Demo展示项目,用于帮助大家更好的上手PaddleSpeech以及使用PaddleSpeech构建自己的应用。 + +智能语音交互部分使用PaddleSpeech,对话以及信息抽取部分使用PaddleNLP,网页前端展示部分基于Vue3进行开发 + +主要功能: + ++ 语音聊天:PaddleSpeech的语音识别能力+语音合成能力,对话部分基于PaddleNLP的闲聊功能 ++ 声纹识别:PaddleSpeech的声纹识别功能展示 ++ 语音识别:支持【实时语音识别】,【端到端识别】,【音频文件识别】三种模式 ++ 语音合成:支持【流式合成】与【端到端合成】两种方式 ++ 语音指令:基于PaddleSpeech的语音识别能力与PaddleNLP的信息抽取,实现交通费的智能报销 + +运行效果: + + ![效果](docs/效果展示.png) + +## 安装 + +### 后端环境安装 + +``` +# 安装环境 +cd speech_server +pip install -r requirements.txt +``` + + +### 前端环境安装 + +前端依赖node.js ,需要提前安装,确保npm可用,npm测试版本8.3.1,建议下载[官网](https://nodejs.org/en/)稳定版的node.js + +``` +# 进入前端目录 +cd web_client + +# 安装yarn,已经安装可跳过 +npm install -g yarn + +# 使用yarn安装前端依赖 +yarn install +``` + + +## 启动服务 + +### 开启后端服务 + +``` +cd speech_server +# 默认8010端口 +python main.py --port 8010 +``` + +### 开启前端服务 + +``` +cd web_client +yarn dev --port 8011 +``` + +默认配置下,前端中配置的后台地址信息是localhost,确保后端服务器和打开页面的游览器在同一台机器上,不在一台机器的配置方式见下方的FAQ:【后端如果部署在其它机器或者别的端口如何修改】 + +## Docker启动 + +### 后端docker +后端docker使用[paddlepaddle官方docker](https://www.paddlepaddle.org.cn),这里演示CPU版本 +``` +# 拉取PaddleSpeech项目 +cd PaddleSpeechServer +git clone https://github.com/PaddlePaddle/PaddleSpeech.git + +# 拉取镜像 +docker pull registry.baidubce.com/paddlepaddle/paddle:2.3.0 + +# 启动容器 +docker run --name paddle -it -p 8010:8010 -v $PWD:/paddle registry.baidubce.com/paddlepaddle/paddle:2.3.0 /bin/bash + +# 进入容器 +cd /paddle + +# 安装依赖 +pip install -r requirements + +# 启动服务 +python main --port 8010 + +``` + +### 前端docker + +前端docker直接使用[node官方的docker](https://hub.docker.com/_/node)即可 + +```shell +docker pull node +``` + +镜像中安装依赖 + +```shell +cd PaddleSpeechWebClient +# 映射外部8011端口 +docker run -it -p 8011:8011 -v $PWD:/paddle node:latest bin/bash +# 进入容器中 +cd /paddle +# 安装依赖 +yarn install +# 启动前端 +yarn dev --port 8011 +``` + + + + + +## FAQ + +#### Q: 如何安装node.js + +A: node.js的安装可以参考[【菜鸟教程】](https://www.runoob.com/nodejs/nodejs-install-setup.html), 确保npm可用 + +#### Q:后端如果部署在其它机器或者别的端口如何修改 + +A:后端的配置地址有分散在两个文件中 + +修改第一个文件`PaddleSpeechWebClient/vite.config.js` + +```json +server: { + host: "0.0.0.0", + proxy: { + "/api": { + target: "http://localhost:8010", // 这里改成后端所在接口 + changeOrigin: true, + rewrite: (path) => path.replace(/^\/api/, ""), + }, + }, + } +``` + +修改第二个文件`PaddleSpeechWebClient/src/api/API.js`(Websocket代理配置失败,所以需要在这个文件中修改) + +```javascript +// websocket (这里改成后端所在的接口) +CHAT_SOCKET_RECORD: 'ws://localhost:8010/ws/asr/offlineStream', // ChatBot websocket 接口 +ASR_SOCKET_RECORD: 'ws://localhost:8010/ws/asr/onlineStream', // Stream ASR 接口 +TTS_SOCKET_RECORD: 'ws://localhost:8010/ws/tts/online', // Stream TTS 接口 +``` + +#### Q:后端以IP地址的形式,前端无法录音 + +A:这里主要是游览器安全策略的限制,需要配置游览器后重启。游览器修改配置可参考[使用js-audio-recorder报浏览器不支持getUserMedia](https://blog.csdn.net/YRY_LIKE_YOU/article/details/113745273) + +chrome设置地址: chrome://flags/#unsafely-treat-insecure-origin-as-secure + + + + +## 参考资料 + +vue实现录音参考资料:https://blog.csdn.net/qq_41619796/article/details/107865602#t1 + +前端流式播放音频参考仓库: + +https://github.com/AnthumChris/fetch-stream-audio + +https://bm.enthuses.me/buffered.php?bref=6677 diff --git "a/demos/speech_web/docs/\346\225\210\346\236\234\345\261\225\347\244\272.png" "b/demos/speech_web/docs/\346\225\210\346\236\234\345\261\225\347\244\272.png" new file mode 100644 index 0000000000000000000000000000000000000000..5f7997c173a685ff546664925a839527bd639d49 Binary files /dev/null and "b/demos/speech_web/docs/\346\225\210\346\236\234\345\261\225\347\244\272.png" differ diff --git a/demos/speech_web/speech_server/conf/tts_online_application.yaml b/demos/speech_web/speech_server/conf/tts_online_application.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0460a5e164ab526ee42a948f8741f04b463d692e --- /dev/null +++ b/demos/speech_web/speech_server/conf/tts_online_application.yaml @@ -0,0 +1,103 @@ +# This is the parameter configuration file for streaming tts server. + +################################################################################# +# SERVER SETTING # +################################################################################# +host: 0.0.0.0 +port: 8092 + +# The task format in the engin_list is: _ +# engine_list choices = ['tts_online', 'tts_online-onnx'], the inference speed of tts_online-onnx is faster than tts_online. +# protocol choices = ['websocket', 'http'] +protocol: 'http' +engine_list: ['tts_online-onnx'] + + +################################################################################# +# ENGINE CONFIG # +################################################################################# + +################################### TTS ######################################### +################### speech task: tts; engine_type: online ####################### +tts_online: + # am (acoustic model) choices=['fastspeech2_csmsc', 'fastspeech2_cnndecoder_csmsc'] + # fastspeech2_cnndecoder_csmsc support streaming am infer. + am: 'fastspeech2_csmsc' + am_config: + am_ckpt: + am_stat: + phones_dict: + tones_dict: + speaker_dict: + spk_id: 0 + + # voc (vocoder) choices=['mb_melgan_csmsc, hifigan_csmsc'] + # Both mb_melgan_csmsc and hifigan_csmsc support streaming voc inference + voc: 'mb_melgan_csmsc' + voc_config: + voc_ckpt: + voc_stat: + + # others + lang: 'zh' + device: 'cpu' # set 'gpu:id' or 'cpu' + # am_block and am_pad only for fastspeech2_cnndecoder_onnx model to streaming am infer, + # when am_pad set 12, streaming synthetic audio is the same as non-streaming synthetic audio + am_block: 72 + am_pad: 12 + # voc_pad and voc_block voc model to streaming voc infer, + # when voc model is mb_melgan_csmsc, voc_pad set 14, streaming synthetic audio is the same as non-streaming synthetic audio; The minimum value of pad can be set to 7, streaming synthetic audio sounds normal + # when voc model is hifigan_csmsc, voc_pad set 19, streaming synthetic audio is the same as non-streaming synthetic audio; voc_pad set 14, streaming synthetic audio sounds normal + voc_block: 36 + voc_pad: 14 + + + +################################################################################# +# ENGINE CONFIG # +################################################################################# + +################################### TTS ######################################### +################### speech task: tts; engine_type: online-onnx ####################### +tts_online-onnx: + # am (acoustic model) choices=['fastspeech2_csmsc_onnx', 'fastspeech2_cnndecoder_csmsc_onnx'] + # fastspeech2_cnndecoder_csmsc_onnx support streaming am infer. + am: 'fastspeech2_cnndecoder_csmsc_onnx' + # am_ckpt is a list, if am is fastspeech2_cnndecoder_csmsc_onnx, am_ckpt = [encoder model, decoder model, postnet model]; + # if am is fastspeech2_csmsc_onnx, am_ckpt = [ckpt model]; + am_ckpt: # list + am_stat: + phones_dict: + tones_dict: + speaker_dict: + spk_id: 0 + am_sample_rate: 24000 + am_sess_conf: + device: "cpu" # set 'gpu:id' or 'cpu' + use_trt: False + cpu_threads: 4 + + # voc (vocoder) choices=['mb_melgan_csmsc_onnx, hifigan_csmsc_onnx'] + # Both mb_melgan_csmsc_onnx and hifigan_csmsc_onnx support streaming voc inference + voc: 'hifigan_csmsc_onnx' + voc_ckpt: + voc_sample_rate: 24000 + voc_sess_conf: + device: "cpu" # set 'gpu:id' or 'cpu' + use_trt: False + cpu_threads: 4 + + # others + lang: 'zh' + # am_block and am_pad only for fastspeech2_cnndecoder_onnx model to streaming am infer, + # when am_pad set 12, streaming synthetic audio is the same as non-streaming synthetic audio + am_block: 72 + am_pad: 12 + # voc_pad and voc_block voc model to streaming voc infer, + # when voc model is mb_melgan_csmsc_onnx, voc_pad set 14, streaming synthetic audio is the same as non-streaming synthetic audio; The minimum value of pad can be set to 7, streaming synthetic audio sounds normal + # when voc model is hifigan_csmsc_onnx, voc_pad set 19, streaming synthetic audio is the same as non-streaming synthetic audio; voc_pad set 14, streaming synthetic audio sounds normal + voc_block: 36 + voc_pad: 14 + # voc_upsample should be same as n_shift on voc config. + voc_upsample: 300 + diff --git a/paddlespeech/server/conf/ws_application.yaml b/demos/speech_web/speech_server/conf/ws_conformer_wenetspeech_application_faster.yaml similarity index 81% rename from paddlespeech/server/conf/ws_application.yaml rename to demos/speech_web/speech_server/conf/ws_conformer_wenetspeech_application_faster.yaml index 43d83f2d46bac910f018bb2b6270b25a84944480..ba413c8023820c90a3d42cf84b5cc7278b67b768 100644 --- a/paddlespeech/server/conf/ws_application.yaml +++ b/demos/speech_web/speech_server/conf/ws_conformer_wenetspeech_application_faster.yaml @@ -7,8 +7,8 @@ host: 0.0.0.0 port: 8090 # The task format in the engin_list is: _ -# task choices = ['asr_online', 'tts_online'] -# protocol = ['websocket', 'http'] (only one can be selected). +# task choices = ['asr_online'] +# protocol = ['websocket'] (only one can be selected). # websocket only support online engine type. protocol: 'websocket' engine_list: ['asr_online'] @@ -21,17 +21,18 @@ engine_list: ['asr_online'] ################################### ASR ######################################### ################### speech task: asr; engine_type: online ####################### asr_online: - model_type: 'deepspeech2online_aishell' + model_type: 'conformer_online_wenetspeech' am_model: # the pdmodel file of am static model [optional] am_params: # the pdiparams file of am static model [optional] lang: 'zh' sample_rate: 16000 cfg_path: decode_method: - num_decoding_left_chunks: force_yes: True - device: # cpu or gpu:id - + device: 'cpu' # cpu or gpu:id + decode_method: "attention_rescoring" + continuous_decoding: True # enable continue decoding when endpoint detected + num_decoding_left_chunks: 16 am_predictor_conf: device: # set 'gpu:id' or 'cpu' switch_ir_optim: True @@ -39,11 +40,9 @@ asr_online: summary: True # False -> do not show predictor config chunk_buffer_conf: - frame_duration_ms: 80 - shift_ms: 40 - sample_rate: 16000 - sample_width: 2 window_n: 7 # frame shift_n: 4 # frame - window_ms: 20 # ms + window_ms: 25 # ms shift_ms: 10 # ms + sample_rate: 16000 + sample_width: 2 diff --git a/demos/speech_web/speech_server/main.py b/demos/speech_web/speech_server/main.py new file mode 100644 index 0000000000000000000000000000000000000000..021f1e16b7b218064cfb94bb8d9c5b29c533a51e --- /dev/null +++ b/demos/speech_web/speech_server/main.py @@ -0,0 +1,492 @@ +# todo: +# 1. 开启服务 +# 2. 接收录音音频,返回识别结果 +# 3. 接收ASR识别结果,返回NLP对话结果 +# 4. 接收NLP对话结果,返回TTS音频 + +import base64 +import yaml +import os +import json +import datetime +import librosa +import soundfile as sf +import numpy as np +import argparse +import uvicorn +import aiofiles +from typing import Optional, List +from pydantic import BaseModel +from fastapi import FastAPI, Header, File, UploadFile, Form, Cookie, WebSocket, WebSocketDisconnect +from fastapi.responses import StreamingResponse +from starlette.responses import FileResponse +from starlette.middleware.cors import CORSMiddleware +from starlette.requests import Request +from starlette.websockets import WebSocketState as WebSocketState + +from src.AudioManeger import AudioMannger +from src.util import * +from src.robot import Robot +from src.WebsocketManeger import ConnectionManager +from src.SpeechBase.vpr import VPR + +from paddlespeech.server.engine.asr.online.asr_engine import PaddleASRConnectionHanddler +from paddlespeech.server.utils.audio_process import float2pcm + + +# 解析配置 +parser = argparse.ArgumentParser( + prog='PaddleSpeechDemo', add_help=True) + +parser.add_argument( + "--port", + action="store", + type=int, + help="port of the app", + default=8010, + required=False) + +args = parser.parse_args() +port = args.port + +# 配置文件 +tts_config = "conf/tts_online_application.yaml" +asr_config = "conf/ws_conformer_wenetspeech_application_faster.yaml" +asr_init_path = "source/demo/demo.wav" +db_path = "source/db/vpr.sqlite" +ie_model_path = "source/model" + +# 路径配置 +UPLOAD_PATH = "source/vpr" +WAV_PATH = "source/wav" + + +base_sources = [ + UPLOAD_PATH, WAV_PATH +] +for path in base_sources: + os.makedirs(path, exist_ok=True) + + +# 初始化 +app = FastAPI() +chatbot = Robot(asr_config, tts_config, asr_init_path, ie_model_path=ie_model_path) +manager = ConnectionManager() +aumanager = AudioMannger(chatbot) +aumanager.init() +vpr = VPR(db_path, dim = 192, top_k = 5) + +# 服务配置 +class NlpBase(BaseModel): + chat: str + +class TtsBase(BaseModel): + text: str + +class Audios: + def __init__(self) -> None: + self.audios = b"" + +audios = Audios() + +###################################################################### +########################### ASR 服务 ################################# +##################################################################### + +# 接收文件,返回ASR结果 +# 上传文件 +@app.post("/asr/offline") +async def speech2textOffline(files: List[UploadFile]): + # 只有第一个有效 + asr_res = "" + for file in files[:1]: + # 生成时间戳 + now_name = "asr_offline_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" + out_file_path = os.path.join(WAV_PATH, now_name) + async with aiofiles.open(out_file_path, 'wb') as out_file: + content = await file.read() # async read + await out_file.write(content) # async write + + # 返回ASR识别结果 + asr_res = chatbot.speech2text(out_file_path) + return SuccessRequest(result=asr_res) + # else: + # return ErrorRequest(message="文件不是.wav格式") + return ErrorRequest(message="上传文件为空") + +# 接收文件,同时将wav强制转成16k, int16类型 +@app.post("/asr/offlinefile") +async def speech2textOfflineFile(files: List[UploadFile]): + # 只有第一个有效 + asr_res = "" + for file in files[:1]: + # 生成时间戳 + now_name = "asr_offline_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" + out_file_path = os.path.join(WAV_PATH, now_name) + async with aiofiles.open(out_file_path, 'wb') as out_file: + content = await file.read() # async read + await out_file.write(content) # async write + + # 将文件转成16k, 16bit类型的wav文件 + wav, sr = librosa.load(out_file_path, sr=16000) + wav = float2pcm(wav) # float32 to int16 + wav_bytes = wav.tobytes() # to bytes + wav_base64 = base64.b64encode(wav_bytes).decode('utf8') + + # 将文件重新写入 + now_name = now_name[:-4] + "_16k" + ".wav" + out_file_path = os.path.join(WAV_PATH, now_name) + sf.write(out_file_path,wav,16000) + + # 返回ASR识别结果 + asr_res = chatbot.speech2text(out_file_path) + response_res = { + "asr_result": asr_res, + "wav_base64": wav_base64 + } + return SuccessRequest(result=response_res) + + return ErrorRequest(message="上传文件为空") + + + +# 流式接收测试 +@app.post("/asr/online1") +async def speech2textOnlineRecive(files: List[UploadFile]): + audio_bin = b'' + for file in files: + content = await file.read() + audio_bin += content + audios.audios += audio_bin + print(f"audios长度变化: {len(audios.audios)}") + return SuccessRequest(message="接收成功") + +# 采集环境噪音大小 +@app.post("/asr/collectEnv") +async def collectEnv(files: List[UploadFile]): + for file in files[:1]: + content = await file.read() # async read + # 初始化, wav 前44字节是头部信息 + aumanager.compute_env_volume(content[44:]) + vad_ = aumanager.vad_threshold + return SuccessRequest(result=vad_,message="采集环境噪音成功") + +# 停止录音 +@app.get("/asr/stopRecord") +async def stopRecord(): + audios.audios = b"" + aumanager.stop() + print("Online录音暂停") + return SuccessRequest(message="停止成功") + +# 恢复录音 +@app.get("/asr/resumeRecord") +async def resumeRecord(): + aumanager.resume() + print("Online录音恢复") + return SuccessRequest(message="Online录音恢复") + + +# 聊天用的ASR +@app.websocket("/ws/asr/offlineStream") +async def websocket_endpoint(websocket: WebSocket): + await manager.connect(websocket) + try: + while True: + asr_res = None + # websocket 不接收,只推送 + data = await websocket.receive_bytes() + if not aumanager.is_pause: + asr_res = aumanager.stream_asr(data) + else: + print("录音暂停") + if asr_res: + await manager.send_personal_message(asr_res, websocket) + aumanager.clear_asr() + + except WebSocketDisconnect: + manager.disconnect(websocket) + # await manager.broadcast(f"用户-{user}-离开") + # print(f"用户-{user}-离开") + + +# Online识别的ASR +@app.websocket('/ws/asr/onlineStream') +async def websocket_endpoint(websocket: WebSocket): + """PaddleSpeech Online ASR Server api + + Args: + websocket (WebSocket): the websocket instance + """ + + #1. the interface wait to accept the websocket protocal header + # and only we receive the header, it establish the connection with specific thread + await websocket.accept() + + #2. if we accept the websocket headers, we will get the online asr engine instance + engine = chatbot.asr.engine + + #3. each websocket connection, we will create an PaddleASRConnectionHanddler to process such audio + # and each connection has its own connection instance to process the request + # and only if client send the start signal, we create the PaddleASRConnectionHanddler instance + connection_handler = None + + try: + #4. we do a loop to process the audio package by package according the protocal + # and only if the client send finished signal, we will break the loop + while True: + # careful here, changed the source code from starlette.websockets + # 4.1 we wait for the client signal for the specific action + assert websocket.application_state == WebSocketState.CONNECTED + message = await websocket.receive() + websocket._raise_on_disconnect(message) + + #4.2 text for the action command and bytes for pcm data + if "text" in message: + # we first parse the specific command + message = json.loads(message["text"]) + if 'signal' not in message: + resp = {"status": "ok", "message": "no valid json data"} + await websocket.send_json(resp) + + # start command, we create the PaddleASRConnectionHanddler instance to process the audio data + # end command, we process the all the last audio pcm and return the final result + # and we break the loop + if message['signal'] == 'start': + resp = {"status": "ok", "signal": "server_ready"} + # do something at begining here + # create the instance to process the audio + # connection_handler = chatbot.asr.connection_handler + connection_handler = PaddleASRConnectionHanddler(engine) + await websocket.send_json(resp) + elif message['signal'] == 'end': + # reset single engine for an new connection + # and we will destroy the connection + connection_handler.decode(is_finished=True) + connection_handler.rescoring() + asr_results = connection_handler.get_result() + connection_handler.reset() + + resp = { + "status": "ok", + "signal": "finished", + 'result': asr_results + } + await websocket.send_json(resp) + break + else: + resp = {"status": "ok", "message": "no valid json data"} + await websocket.send_json(resp) + elif "bytes" in message: + # bytes for the pcm data + message = message["bytes"] + print("###############") + print("len message: ", len(message)) + print("###############") + + # we extract the remained audio pcm + # and decode for the result in this package data + connection_handler.extract_feat(message) + connection_handler.decode(is_finished=False) + asr_results = connection_handler.get_result() + + # return the current period result + # if the engine create the vad instance, this connection will have many period results + resp = {'result': asr_results} + print(resp) + await websocket.send_json(resp) + except WebSocketDisconnect: + pass + +###################################################################### +########################### NLP 服务 ################################# +##################################################################### + +@app.post("/nlp/chat") +async def chatOffline(nlp_base:NlpBase): + chat = nlp_base.chat + if not chat: + return ErrorRequest(message="传入文本为空") + else: + res = chatbot.chat(chat) + return SuccessRequest(result=res) + +@app.post("/nlp/ie") +async def ieOffline(nlp_base:NlpBase): + nlp_text = nlp_base.chat + if not nlp_text: + return ErrorRequest(message="传入文本为空") + else: + res = chatbot.ie(nlp_text) + return SuccessRequest(result=res) + +###################################################################### +########################### TTS 服务 ################################# +##################################################################### + +@app.post("/tts/offline") +async def text2speechOffline(tts_base:TtsBase): + text = tts_base.text + if not text: + return ErrorRequest(message="文本为空") + else: + now_name = "tts_"+ datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" + out_file_path = os.path.join(WAV_PATH, now_name) + # 保存为文件,再转成base64传输 + chatbot.text2speech(text, outpath=out_file_path) + with open(out_file_path, "rb") as f: + data_bin = f.read() + base_str = base64.b64encode(data_bin) + return SuccessRequest(result=base_str) + +# http流式TTS +@app.post("/tts/online") +async def stream_tts(request_body: TtsBase): + text = request_body.text + return StreamingResponse(chatbot.text2speechStreamBytes(text=text)) + +# ws流式TTS +@app.websocket("/ws/tts/online") +async def stream_ttsWS(websocket: WebSocket): + await manager.connect(websocket) + try: + while True: + text = await websocket.receive_text() + # 用 websocket 流式接收音频数据 + if text: + for sub_wav in chatbot.text2speechStream(text=text): + # print("发送sub wav: ", len(sub_wav)) + res = { + "wav": sub_wav, + "done": False + } + await websocket.send_json(res) + + # 输送结束 + res = { + "wav": sub_wav, + "done": True + } + await websocket.send_json(res) + # manager.disconnect(websocket) + + except WebSocketDisconnect: + manager.disconnect(websocket) + + +###################################################################### +########################### VPR 服务 ################################# +##################################################################### + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"]) + + +@app.post('/vpr/enroll') +async def vpr_enroll(table_name: str=None, + spk_id: str=Form(...), + audio: UploadFile=File(...)): + # Enroll the uploaded audio with spk-id into MySQL + try: + if not spk_id: + return {'status': False, 'msg': "spk_id can not be None"} + # Save the upload data to server. + content = await audio.read() + now_name = "vpr_enroll_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" + audio_path = os.path.join(UPLOAD_PATH, now_name) + + with open(audio_path, "wb+") as f: + f.write(content) + vpr.vpr_enroll(username=spk_id, wav_path=audio_path) + return {'status': True, 'msg': "Successfully enroll data!"} + except Exception as e: + return {'status': False, 'msg': e} + + +@app.post('/vpr/recog') +async def vpr_recog(request: Request, + table_name: str=None, + audio: UploadFile=File(...)): + # Voice print recognition online + # try: + # Save the upload data to server. + content = await audio.read() + now_name = "vpr_query_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" + query_audio_path = os.path.join(UPLOAD_PATH, now_name) + with open(query_audio_path, "wb+") as f: + f.write(content) + spk_ids, paths, scores = vpr.do_search_vpr(query_audio_path) + + res = dict(zip(spk_ids, zip(paths, scores))) + # Sort results by distance metric, closest distances first + res = sorted(res.items(), key=lambda item: item[1][1], reverse=True) + return res + # except Exception as e: + # return {'status': False, 'msg': e}, 400 + + +@app.post('/vpr/del') +async def vpr_del(spk_id: dict=None): + # Delete a record by spk_id in MySQL + try: + spk_id = spk_id['spk_id'] + if not spk_id: + return {'status': False, 'msg': "spk_id can not be None"} + vpr.vpr_del(username=spk_id) + return {'status': True, 'msg': "Successfully delete data!"} + except Exception as e: + return {'status': False, 'msg': e}, 400 + + +@app.get('/vpr/list') +async def vpr_list(): + # Get all records in MySQL + try: + spk_ids, vpr_ids = vpr.do_list() + return spk_ids, vpr_ids + except Exception as e: + return {'status': False, 'msg': e}, 400 + + +@app.get('/vpr/database64') +async def vpr_database64(vprId: int): + # Get the audio file from path by spk_id in MySQL + try: + if not vprId: + return {'status': False, 'msg': "vpr_id can not be None"} + audio_path = vpr.do_get_wav(vprId) + # 返回base64 + + # 将文件转成16k, 16bit类型的wav文件 + wav, sr = librosa.load(audio_path, sr=16000) + wav = float2pcm(wav) # float32 to int16 + wav_bytes = wav.tobytes() # to bytes + wav_base64 = base64.b64encode(wav_bytes).decode('utf8') + + return SuccessRequest(result=wav_base64) + except Exception as e: + return {'status': False, 'msg': e}, 400 + +@app.get('/vpr/data') +async def vpr_data(vprId: int): + # Get the audio file from path by spk_id in MySQL + try: + if not vprId: + return {'status': False, 'msg': "vpr_id can not be None"} + audio_path = vpr.do_get_wav(vprId) + return FileResponse(audio_path) + except Exception as e: + return {'status': False, 'msg': e}, 400 + +if __name__ == '__main__': + uvicorn.run(app=app, host='0.0.0.0', port=port) + + + + + + diff --git a/demos/speech_web/speech_server/requirements.txt b/demos/speech_web/speech_server/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7e7bd16803914f5f242740362e6fe2eeb97c252c --- /dev/null +++ b/demos/speech_web/speech_server/requirements.txt @@ -0,0 +1,14 @@ +aiofiles +fastapi +librosa +numpy +pydantic +scikit_learn +SoundFile +starlette +uvicorn +paddlepaddle +paddlespeech +paddlenlp +faiss-cpu +python-multipart \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/AudioManeger.py b/demos/speech_web/speech_server/src/AudioManeger.py new file mode 100644 index 0000000000000000000000000000000000000000..5cf1296a37c310d83ab18702300fa8fe82534b1f --- /dev/null +++ b/demos/speech_web/speech_server/src/AudioManeger.py @@ -0,0 +1,173 @@ +import imp +from queue import Queue +import numpy as np +import os +import wave +import random +import datetime +from .util import randName + + +class AudioMannger: + def __init__(self, robot, frame_length=160, frame=10, data_width=2, vad_default = 300): + # 二进制 pcm 流 + self.audios = b'' + self.asr_result = "" + # Speech 核心主体 + self.robot = robot + + self.file_dir = "source" + os.makedirs(self.file_dir, exist_ok=True) + self.vad_deafult = vad_default + self.vad_threshold = vad_default + self.vad_threshold_path = os.path.join(self.file_dir, "vad_threshold.npy") + + # 10ms 一帧 + self.frame_length = frame_length + # 10帧,检测一次 vad + self.frame = frame + # int 16, 两个bytes + self.data_width = data_width + # window + self.window_length = frame_length * frame * data_width + + # 是否开始录音 + self.on_asr = False + self.silence_cnt = 0 + self.max_silence_cnt = 4 + self.is_pause = False # 录音暂停与恢复 + + + + def init(self): + if os.path.exists(self.vad_threshold_path): + # 平均响度文件存在 + self.vad_threshold = np.load(self.vad_threshold_path) + + + def clear_audio(self): + # 清空 pcm 累积片段与 asr 识别结果 + self.audios = b'' + + def clear_asr(self): + self.asr_result = "" + + + def compute_chunk_volume(self, start_index, pcm_bins): + # 根据帧长计算能量平均值 + pcm_bin = pcm_bins[start_index: start_index + self.window_length] + # 转成 numpy + pcm_np = np.frombuffer(pcm_bin, np.int16) + # 归一化 + 计算响度 + x = pcm_np.astype(np.float32) + x = np.abs(x) + return np.mean(x) + + + def is_speech(self, start_index, pcm_bins): + # 检查是否没 + if start_index > len(pcm_bins): + return False + # 检查从这个 start 开始是否为静音帧 + energy = self.compute_chunk_volume(start_index=start_index, pcm_bins=pcm_bins) + # print(energy) + if energy > self.vad_threshold: + return True + else: + return False + + def compute_env_volume(self, pcm_bins): + max_energy = 0 + start = 0 + while start < len(pcm_bins): + energy = self.compute_chunk_volume(start_index=start, pcm_bins=pcm_bins) + if energy > max_energy: + max_energy = energy + start += self.window_length + self.vad_threshold = max_energy + 100 if max_energy > self.vad_deafult else self.vad_deafult + + # 保存成文件 + np.save(self.vad_threshold_path, self.vad_threshold) + print(f"vad 阈值大小: {self.vad_threshold}") + print(f"环境采样保存: {os.path.realpath(self.vad_threshold_path)}") + + def stream_asr(self, pcm_bin): + # 先把 pcm_bin 送进去做端点检测 + start = 0 + while start < len(pcm_bin): + if self.is_speech(start_index=start, pcm_bins=pcm_bin): + self.on_asr = True + self.silence_cnt = 0 + print("录音中") + self.audios += pcm_bin[ start : start + self.window_length] + else: + if self.on_asr: + self.silence_cnt += 1 + if self.silence_cnt > self.max_silence_cnt: + self.on_asr = False + self.silence_cnt = 0 + # 录音停止 + print("录音停止") + # audios 保存为 wav, 送入 ASR + if len(self.audios) > 2 * 16000: + file_path = os.path.join(self.file_dir, "asr_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav") + self.save_audio(file_path=file_path) + self.asr_result = self.robot.speech2text(file_path) + self.clear_audio() + return self.asr_result + else: + # 正常接收 + print("录音中 静音") + self.audios += pcm_bin[ start : start + self.window_length] + start += self.window_length + return "" + + def save_audio(self, file_path): + print("保存音频") + wf = wave.open(file_path, 'wb') # 创建一个音频文件,名字为“01.wav" + wf.setnchannels(1) # 设置声道数为2 + wf.setsampwidth(2) # 设置采样深度为 + wf.setframerate(16000) # 设置采样率为16000 + # 将数据写入创建的音频文件 + wf.writeframes(self.audios) + # 写完后将文件关闭 + wf.close() + + def end(self): + # audios 保存为 wav, 送入 ASR + file_path = os.path.join(self.file_dir, "asr.wav") + self.save_audio(file_path=file_path) + return self.robot.speech2text(file_path) + + def stop(self): + self.is_pause = True + self.audios = b'' + + def resume(self): + self.is_pause = False + + +if __name__ == '__main__': + from robot import Robot + + chatbot = Robot() + chatbot.init() + audio_manger = AudioMannger(chatbot) + + file_list = [ + "source/20220418145230qbenc.pcm", + ] + + for file in file_list: + with open(file, "rb") as f: + pcm_bin = f.read() + print(len(pcm_bin)) + asr_ = audio_manger.stream_asr(pcm_bin=pcm_bin) + print(asr_) + + print(audio_manger.end()) + + print(chatbot.speech2text("source/20220418145230zrxia.wav")) + + + \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/SpeechBase/asr.py b/demos/speech_web/speech_server/src/SpeechBase/asr.py new file mode 100644 index 0000000000000000000000000000000000000000..4563d3ed83f10d59c4bb99db67552bfe2bd72d8d --- /dev/null +++ b/demos/speech_web/speech_server/src/SpeechBase/asr.py @@ -0,0 +1,87 @@ +from re import sub +import numpy as np +import paddle +import librosa +import soundfile + +from paddlespeech.server.engine.asr.online.asr_engine import ASREngine +from paddlespeech.server.engine.asr.online.asr_engine import PaddleASRConnectionHanddler +from paddlespeech.server.utils.config import get_config + +def readWave(samples): + x_len = len(samples) + + chunk_size = 85 * 16 #80ms, sample_rate = 16kHz + if x_len % chunk_size != 0: + padding_len_x = chunk_size - x_len % chunk_size + else: + padding_len_x = 0 + + padding = np.zeros((padding_len_x), dtype=samples.dtype) + padded_x = np.concatenate([samples, padding], axis=0) + + assert (x_len + padding_len_x) % chunk_size == 0 + num_chunk = (x_len + padding_len_x) / chunk_size + num_chunk = int(num_chunk) + for i in range(0, num_chunk): + start = i * chunk_size + end = start + chunk_size + x_chunk = padded_x[start:end] + yield x_chunk + + +class ASR: + def __init__(self, config_path, ) -> None: + self.config = get_config(config_path)['asr_online'] + self.engine = ASREngine() + self.engine.init(self.config) + self.connection_handler = PaddleASRConnectionHanddler(self.engine) + + def offlineASR(self, samples, sample_rate=16000): + x_chunk, x_chunk_lens = self.engine.preprocess(samples=samples, sample_rate=sample_rate) + self.engine.run(x_chunk, x_chunk_lens) + result = self.engine.postprocess() + self.engine.reset() + return result + + def onlineASR(self, samples:bytes=None, is_finished=False): + if not is_finished: + # 流式开始 + self.connection_handler.extract_feat(samples) + self.connection_handler.decode(is_finished) + asr_results = self.connection_handler.get_result() + return asr_results + else: + # 流式结束 + self.connection_handler.decode(is_finished=True) + self.connection_handler.rescoring() + asr_results = self.connection_handler.get_result() + self.connection_handler.reset() + return asr_results + + +if __name__ == '__main__': + config_path = r"../../PaddleSpeech/paddlespeech/server/conf/ws_conformer_application.yaml" + + wav_path = r"../../source/demo/demo_16k.wav" + samples, sample_rate = soundfile.read(wav_path, dtype='int16') + + asr = ASR(config_path=config_path) + end_result = asr.offlineASR(samples=samples, sample_rate=sample_rate) + print("端到端识别结果:", end_result) + + for sub_wav in readWave(samples=samples): + # print(sub_wav) + message = sub_wav.tobytes() + offline_result = asr.onlineASR(message, is_finished=False) + print("流式识别结果: ", offline_result) + offline_result = asr.onlineASR(is_finished=True) + print("流式识别结果: ", offline_result) + + + + + + + + \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/SpeechBase/nlp.py b/demos/speech_web/speech_server/src/SpeechBase/nlp.py new file mode 100644 index 0000000000000000000000000000000000000000..3bf2c76441b6bdae6c1c3bedd39a847be102e38a --- /dev/null +++ b/demos/speech_web/speech_server/src/SpeechBase/nlp.py @@ -0,0 +1,28 @@ +from paddlenlp import Taskflow + +class NLP: + def __init__(self, ie_model_path=None): + schema = ["时间", "出发地", "目的地", "费用"] + if ie_model_path: + self.ie_model = Taskflow("information_extraction", + schema=schema, task_path=ie_model_path) + else: + self.ie_model = Taskflow("information_extraction", + schema=schema) + + self.dialogue_model = Taskflow("dialogue") + + def chat(self, text): + result = self.dialogue_model([text]) + return result[0] + + def ie(self, text): + result = self.ie_model(text) + return result + +if __name__ == '__main__': + ie_model_path = "../../source/model/" + nlp = NLP(ie_model_path=ie_model_path) + text = "今天早上我从大牛坊去百度科技园花了七百块钱" + print(nlp.ie(text)) + \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/SpeechBase/sql_helper.py b/demos/speech_web/speech_server/src/SpeechBase/sql_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..c398fa2a61a8f6ff22f8e757daa7fc4cc10980e3 --- /dev/null +++ b/demos/speech_web/speech_server/src/SpeechBase/sql_helper.py @@ -0,0 +1,152 @@ +import base64 +import sqlite3 +import os +import numpy as np +from pkg_resources import resource_stream + + +def dict_factory(cursor, row): + d = {} + for idx, col in enumerate(cursor.description): + d[col[0]] = row[idx] + return d + +class DataBase(object): + def __init__(self, db_path:str): + db_path = os.path.realpath(db_path) + + if os.path.exists(db_path): + self.db_path = db_path + else: + db_path_dir = os.path.dirname(db_path) + os.makedirs(db_path_dir, exist_ok=True) + self.db_path = db_path + + self.conn = sqlite3.connect(self.db_path) + self.conn.row_factory = dict_factory + self.cursor = self.conn.cursor() + self.init_database() + + def init_database(self): + """ + 初始化数据库, 若表不存在则创建 + """ + sql = """ + CREATE TABLE IF NOT EXISTS vprtable ( + `id` INTEGER PRIMARY KEY AUTOINCREMENT, + `username` TEXT NOT NULL, + `vector` TEXT NOT NULL, + `wavpath` TEXT NOT NULL + ); + """ + self.cursor.execute(sql) + self.conn.commit() + + def execute_base(self, sql, data_dict): + self.cursor.execute(sql, data_dict) + self.conn.commit() + + def insert_one(self, username, vector_base64:str, wav_path): + if not os.path.exists(wav_path): + return None, "wav not exists" + else: + sql = f""" + insert into + vprtable (username, vector, wavpath) + values (?, ?, ?) + """ + try: + self.cursor.execute(sql, (username, vector_base64, wav_path)) + self.conn.commit() + lastidx = self.cursor.lastrowid + return lastidx, "data insert success" + except Exception as e: + print(e) + return None, e + + def select_all(self): + sql = """ + SELECT * from vprtable + """ + result = self.cursor.execute(sql).fetchall() + return result + + def select_by_id(self, vpr_id): + sql = f""" + SELECT * from vprtable WHERE `id` = {vpr_id} + """ + result = self.cursor.execute(sql).fetchall() + return result + + def select_by_username(self, username): + sql = f""" + SELECT * from vprtable WHERE `username` = '{username}' + """ + result = self.cursor.execute(sql).fetchall() + return result + + def drop_by_username(self, username): + sql = f""" + DELETE from vprtable WHERE `username`='{username}' + """ + self.cursor.execute(sql) + self.conn.commit() + + def drop_all(self): + sql = f""" + DELETE from vprtable + """ + self.cursor.execute(sql) + self.conn.commit() + + def drop_table(self): + sql = f""" + DROP TABLE vprtable + """ + self.cursor.execute(sql) + self.conn.commit() + + def encode_vector(self, vector:np.ndarray): + return base64.b64encode(vector).decode('utf8') + + def decode_vector(self, vector_base64, dtype=np.float32): + b = base64.b64decode(vector_base64) + vc = np.frombuffer(b, dtype=dtype) + return vc + +if __name__ == '__main__': + db_path = "../../source/db/vpr.sqlite" + db = DataBase(db_path) + + # 准备数据 + import numpy as np + vector = np.random.randn((192)).astype(np.float32).tobytes() + vector_base64 = base64.b64encode(vector).decode('utf8') + username = "sss" + wav_path = r"../../source/demo/demo_16k.wav" + + # 插入数据 + db.insert_one(username, vector_base64, wav_path) + + # 查询数据 + res_all = db.select_all() + print("res_all: ", res_all) + + s_id = res_all[0]['id'] + res_id = db.select_by_id(s_id) + print("res_id: ", res_id) + + res_uername = db.select_by_username(username) + print("res_username: ", res_uername) + + # base64还原 + b = base64.b64decode(res_uername[0]['vector']) + vc = np.frombuffer(b, dtype=np.float32) + print(vc) + + # 删除数据 + db.drop_by_username(username) + res_all = db.select_all() + print("删除后 res_all: ", res_all) + db.drop_all() + \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/SpeechBase/tts.py b/demos/speech_web/speech_server/src/SpeechBase/tts.py new file mode 100644 index 0000000000000000000000000000000000000000..5cf53a0253cf7745745d58e41f4f063aa7d7f2ff --- /dev/null +++ b/demos/speech_web/speech_server/src/SpeechBase/tts.py @@ -0,0 +1,121 @@ +# tts 推理引擎,支持流式与非流式 +# 精简化使用 +# 用 onnxruntime 进行推理 +# 1. 下载对应的模型 +# 2. 加载模型 +# 3. 端到端推理 +# 4. 流式推理 + +import base64 + +import numpy as np +from paddlespeech.server.utils.onnx_infer import get_sess +from paddlespeech.t2s.frontend.zh_frontend import Frontend +from paddlespeech.server.utils.util import denorm, get_chunks +from paddlespeech.server.utils.audio_process import float2pcm +from paddlespeech.server.utils.config import get_config + +from paddlespeech.server.engine.tts.online.onnx.tts_engine import TTSEngine + + +class TTS: + def __init__(self, config_path): + self.config = get_config(config_path)['tts_online-onnx'] + self.config['voc_block'] = 36 + self.engine = TTSEngine() + self.engine.init(self.config) + self.engine.warm_up() + + # 前端初始化 + self.frontend = Frontend( + phone_vocab_path=self.engine.executor.phones_dict, + tone_vocab_path=None) + + def depadding(self, data, chunk_num, chunk_id, block, pad, upsample): + """ + Streaming inference removes the result of pad inference + """ + front_pad = min(chunk_id * block, pad) + # first chunk + if chunk_id == 0: + data = data[:block * upsample] + # last chunk + elif chunk_id == chunk_num - 1: + data = data[front_pad * upsample:] + # middle chunk + else: + data = data[front_pad * upsample:(front_pad + block) * upsample] + + return data + + def offlineTTS(self, text): + get_tone_ids = False + merge_sentences = False + + input_ids = self.frontend.get_input_ids( + text, + merge_sentences=merge_sentences, + get_tone_ids=get_tone_ids) + phone_ids = input_ids["phone_ids"] + wav_list = [] + for i in range(len(phone_ids)): + orig_hs = self.engine.executor.am_encoder_infer_sess.run( + None, input_feed={'text': phone_ids[i].numpy()} + ) + hs = orig_hs[0] + am_decoder_output = self.engine.executor.am_decoder_sess.run( + None, input_feed={'xs': hs}) + am_postnet_output = self.engine.executor.am_postnet_sess.run( + None, + input_feed={ + 'xs': np.transpose(am_decoder_output[0], (0, 2, 1)) + }) + am_output_data = am_decoder_output + np.transpose( + am_postnet_output[0], (0, 2, 1)) + normalized_mel = am_output_data[0][0] + mel = denorm(normalized_mel, self.engine.executor.am_mu, self.engine.executor.am_std) + wav = self.engine.executor.voc_sess.run( + output_names=None, input_feed={'logmel': mel})[0] + wav_list.append(wav) + wavs = np.concatenate(wav_list) + return wavs + + def streamTTS(self, text): + for sub_wav_base64 in self.engine.run(sentence=text): + yield sub_wav_base64 + + def streamTTSBytes(self, text): + for wav in self.engine.executor.infer( + text=text, + lang=self.engine.config.lang, + am=self.engine.config.am, + spk_id=0): + wav = float2pcm(wav) # float32 to int16 + wav_bytes = wav.tobytes() # to bytes + yield wav_bytes + + + def after_process(self, wav): + # for tvm + wav = float2pcm(wav) # float32 to int16 + wav_bytes = wav.tobytes() # to bytes + wav_base64 = base64.b64encode(wav_bytes).decode('utf8') # to base64 + return wav_base64 + + def streamTTS_TVM(self, text): + # 用 TVM 优化 + pass + +if __name__ == '__main__': + text = "啊哈哈哈哈哈哈啊哈哈哈哈哈哈啊哈哈哈哈哈哈啊哈哈哈哈哈哈啊哈哈哈哈哈哈" + config_path="../../PaddleSpeech/demos/streaming_tts_server/conf/tts_online_application.yaml" + tts = TTS(config_path) + + for sub_wav in tts.streamTTS(text): + print("sub_wav_base64: ", len(sub_wav)) + + end_wav = tts.offlineTTS(text) + print(end_wav) + + + \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/SpeechBase/vpr.py b/demos/speech_web/speech_server/src/SpeechBase/vpr.py new file mode 100644 index 0000000000000000000000000000000000000000..8b3863e8ba737e08775eda86da999b0bac371dad --- /dev/null +++ b/demos/speech_web/speech_server/src/SpeechBase/vpr.py @@ -0,0 +1,152 @@ +# vpr Demo 没有使用 mysql 与 muilvs, 仅用于docker演示 +import logging +import faiss +from matplotlib import use +import numpy as np +from .sql_helper import DataBase +from .vpr_encode import get_audio_embedding + +class VPR: + def __init__(self, db_path, dim, top_k) -> None: + # 初始化 + self.db_path = db_path + self.dim = dim + self.top_k = top_k + self.dtype = np.float32 + self.vpr_idx = 0 + + # db 初始化 + self.db = DataBase(db_path) + + # faiss 初始化 + index_ip = faiss.IndexFlatIP(dim) + self.index_ip = faiss.IndexIDMap(index_ip) + self.init() + + def init(self): + # demo 初始化,把 mysql中的向量注册到 faiss 中 + sql_dbs = self.db.select_all() + if sql_dbs: + for sql_db in sql_dbs: + idx = sql_db['id'] + vc_bs64 = sql_db['vector'] + vc = self.db.decode_vector(vc_bs64) + if len(vc.shape) == 1: + vc = np.expand_dims(vc, axis=0) + # 构建数据库 + self.index_ip.add_with_ids(vc, np.array((idx,)).astype('int64')) + logging.info("faiss 构建完毕") + + def faiss_enroll(self, idx, vc): + self.index_ip.add_with_ids(vc, np.array((idx,)).astype('int64')) + + def vpr_enroll(self, username, wav_path): + # 注册声纹 + emb = get_audio_embedding(wav_path) + emb = np.expand_dims(emb, axis=0) + if emb is not None: + emb_bs64 = self.db.encode_vector(emb) + last_idx, mess = self.db.insert_one(username, emb_bs64, wav_path) + if last_idx: + # faiss 注册 + self.faiss_enroll(last_idx, emb) + else: + last_idx, mess = None + return last_idx + + def vpr_recog(self, wav_path): + # 识别声纹 + emb_search = get_audio_embedding(wav_path) + + if emb_search is not None: + emb_search = np.expand_dims(emb_search, axis=0) + D, I = self.index_ip.search(emb_search, self.top_k) + D = D.tolist()[0] + I = I.tolist()[0] + return [(round(D[i] * 100, 2 ), I[i]) for i in range(len(D)) if I[i] != -1] + else: + logging.error("识别失败") + return None + + def do_search_vpr(self, wav_path): + spk_ids, paths, scores = [], [], [] + recog_result = self.vpr_recog(wav_path) + for score, idx in recog_result: + username = self.db.select_by_id(idx)[0]['username'] + if username not in spk_ids: + spk_ids.append(username) + scores.append(score) + paths.append("") + return spk_ids, paths, scores + + def vpr_del(self, username): + # 根据用户username, 删除声纹 + # 查用户ID,删除对应向量 + res = self.db.select_by_username(username) + for r in res: + idx = r['id'] + self.index_ip.remove_ids(np.array((idx,)).astype('int64')) + + self.db.drop_by_username(username) + + def vpr_list(self): + # 获取数据列表 + return self.db.select_all() + + def do_list(self): + spk_ids, vpr_ids = [], [] + for res in self.db.select_all(): + spk_ids.append(res['username']) + vpr_ids.append(res['id']) + return spk_ids, vpr_ids + + def do_get_wav(self, vpr_idx): + res = self.db.select_by_id(vpr_idx) + return res[0]['wavpath'] + + + def vpr_data(self, idx): + # 获取对应ID的数据 + res = self.db.select_by_id(idx) + return res + + def vpr_droptable(self): + # 删除表 + self.db.drop_table() + # 清空 faiss + self.index_ip.reset() + + + +if __name__ == '__main__': + + db_path = "../../source/db/vpr.sqlite" + dim = 192 + top_k = 5 + vpr = VPR(db_path, dim, top_k) + + # 准备测试数据 + username = "sss" + wav_path = r"../../source/demo/demo_16k.wav" + + # 注册声纹 + vpr.vpr_enroll(username, wav_path) + + # 获取数据 + print(vpr.vpr_list()) + + # 识别声纹 + recolist = vpr.vpr_recog(wav_path) + print(recolist) + + # 通过 id 获取数据 + idx = recolist[0][1] + print(vpr.vpr_data(idx)) + + # 删除声纹 + vpr.vpr_del(username) + vpr.vpr_droptable() + + + + \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/SpeechBase/vpr_encode.py b/demos/speech_web/speech_server/src/SpeechBase/vpr_encode.py new file mode 100644 index 0000000000000000000000000000000000000000..5a6421336eb000450b8f432e39885cd5145b1585 --- /dev/null +++ b/demos/speech_web/speech_server/src/SpeechBase/vpr_encode.py @@ -0,0 +1,26 @@ +from paddlespeech.cli import VectorExecutor +import numpy as np +import logging + +vector_executor = VectorExecutor() + +def get_audio_embedding(path): + """ + Use vpr_inference to generate embedding of audio + """ + try: + embedding = vector_executor( + audio_file=path, model='ecapatdnn_voxceleb12') + embedding = embedding / np.linalg.norm(embedding) + return embedding + except Exception as e: + logging.error(f"Error with embedding:{e}") + return None + +if __name__ == '__main__': + audio_path = r"../../source/demo/demo_16k.wav" + emb = get_audio_embedding(audio_path) + print(emb.shape) + print(emb.dtype) + print(type(emb)) + \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/WebsocketManeger.py b/demos/speech_web/speech_server/src/WebsocketManeger.py new file mode 100644 index 0000000000000000000000000000000000000000..5edde8430232b4fd6f74398b6039ac657c2ea2d2 --- /dev/null +++ b/demos/speech_web/speech_server/src/WebsocketManeger.py @@ -0,0 +1,31 @@ +from typing import List + +from fastapi import WebSocket + +class ConnectionManager: + def __init__(self): + # 存放激活的ws连接对象 + self.active_connections: List[WebSocket] = [] + + async def connect(self, ws: WebSocket): + # 等待连接 + await ws.accept() + # 存储ws连接对象 + self.active_connections.append(ws) + + def disconnect(self, ws: WebSocket): + # 关闭时 移除ws对象 + self.active_connections.remove(ws) + + @staticmethod + async def send_personal_message(message: str, ws: WebSocket): + # 发送个人消息 + await ws.send_text(message) + + async def broadcast(self, message: str): + # 广播消息 + for connection in self.active_connections: + await connection.send_text(message) + + +manager = ConnectionManager() \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/robot.py b/demos/speech_web/speech_server/src/robot.py new file mode 100644 index 0000000000000000000000000000000000000000..05ac867ecfff0539a8e4745dc6303e1244c2469d --- /dev/null +++ b/demos/speech_web/speech_server/src/robot.py @@ -0,0 +1,93 @@ +from paddlespeech.cli.asr.infer import ASRExecutor +import soundfile as sf +import os +import librosa + +from src.SpeechBase.asr import ASR +from src.SpeechBase.tts import TTS +from src.SpeechBase.nlp import NLP + + +class Robot: + def __init__(self, asr_config, tts_config,asr_init_path, + ie_model_path=None) -> None: + self.nlp = NLP(ie_model_path=ie_model_path) + self.asr = ASR(config_path=asr_config) + self.tts = TTS(config_path=tts_config) + self.tts_sample_rate = 24000 + self.asr_sample_rate = 16000 + + # 流式识别效果不如端到端的模型,这里流式模型与端到端模型分开 + self.asr_model = ASRExecutor() + self.asr_name = "conformer_wenetspeech" + self.warm_up_asrmodel(asr_init_path) + + + def warm_up_asrmodel(self, asr_init_path): + if not os.path.exists(asr_init_path): + path_dir = os.path.dirname(asr_init_path) + if not os.path.exists(path_dir): + os.makedirs(path_dir, exist_ok=True) + + # TTS生成,采样率24000 + text = "生成初始音频" + self.text2speech(text, asr_init_path) + + # asr model初始化 + self.asr_model(asr_init_path, model=self.asr_name,lang='zh', + sample_rate=16000) + + + def speech2text(self, audio_file): + self.asr_model.preprocess(self.asr_name, audio_file) + self.asr_model.infer(self.asr_name) + res = self.asr_model.postprocess() + return res + + def text2speech(self, text, outpath): + wav = self.tts.offlineTTS(text) + sf.write( + outpath, wav, samplerate=self.tts_sample_rate) + res = wav + return res + + def text2speechStream(self, text): + for sub_wav_base64 in self.tts.streamTTS(text=text): + yield sub_wav_base64 + + def text2speechStreamBytes(self, text): + for wav_bytes in self.tts.streamTTSBytes(text=text): + yield wav_bytes + + def chat(self, text): + result = self.nlp.chat(text) + return result + + def ie(self, text): + result = self.nlp.ie(text) + return result + +if __name__ == '__main__': + tts_config = "../PaddleSpeech/demos/streaming_tts_server/conf/tts_online_application.yaml" + asr_config = "../PaddleSpeech/demos/streaming_asr_server/conf/ws_conformer_application.yaml" + demo_wav = "../source/demo/demo_16k.wav" + ie_model_path = "../source/model" + tts_wav = "../source/demo/tts.wav" + text = "今天天气真不错" + ie_text = "今天晚上我从大牛坊出发去三里屯花了六十五块钱" + + + robot = Robot(asr_config, tts_config, asr_init_path=demo_wav) + res = robot.speech2text(demo_wav) + print(res) + + res = robot.chat(text) + print(res) + print("tts offline") + robot.text2speech(res, tts_wav) + + print("ie test") + res = robot.ie(ie_text) + print(res) + + \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/util.py b/demos/speech_web/speech_server/src/util.py new file mode 100644 index 0000000000000000000000000000000000000000..34005d9194c752bc0c3bf9455e834d2facfdd454 --- /dev/null +++ b/demos/speech_web/speech_server/src/util.py @@ -0,0 +1,18 @@ +import random + +def randName(n=5): + return "".join(random.sample('zyxwvutsrqponmlkjihgfedcba',n)) + +def SuccessRequest(result=None, message="ok"): + return { + "code": 0, + "result":result, + "message": message + } + +def ErrorRequest(result=None, message="error"): + return { + "code": -1, + "result":result, + "message": message + } \ No newline at end of file diff --git a/demos/speech_web/web_client/.gitignore b/demos/speech_web/web_client/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e33435dcedff00bbbb3434090fdd6678f9e88c26 --- /dev/null +++ b/demos/speech_web/web_client/.gitignore @@ -0,0 +1,25 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? +.vscode/* diff --git a/demos/speech_web/web_client/index.html b/demos/speech_web/web_client/index.html new file mode 100644 index 0000000000000000000000000000000000000000..6b20e7b7bb6ef38312f3dd8da055c9f7720eb824 --- /dev/null +++ b/demos/speech_web/web_client/index.html @@ -0,0 +1,13 @@ + + + + + + + 飞桨PaddleSpeech + + +
+ + + diff --git a/demos/speech_web/web_client/package-lock.json b/demos/speech_web/web_client/package-lock.json new file mode 100644 index 0000000000000000000000000000000000000000..f1c779782307c44e2b33807545e82d0340ec222b --- /dev/null +++ b/demos/speech_web/web_client/package-lock.json @@ -0,0 +1,1869 @@ +{ + "name": "paddlespeechwebclient", + "version": "0.0.0", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "name": "paddlespeechwebclient", + "version": "0.0.0", + "dependencies": { + "ant-design-vue": "^2.2.8", + "axios": "^0.26.1", + "element-plus": "^2.1.9", + "js-audio-recorder": "0.5.7", + "lamejs": "^1.2.1", + "less": "^4.1.2", + "vue": "^3.2.25" + }, + "devDependencies": { + "@vitejs/plugin-vue": "^2.3.0", + "vite": "^2.9.0" + } + }, + "node_modules/@ant-design/colors": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/@ant-design/colors/-/colors-6.0.0.tgz", + "integrity": "sha512-qAZRvPzfdWHtfameEGP2Qvuf838NhergR35o+EuVyB5XvSA98xod5r4utvi4TJ3ywmevm290g9nsCG5MryrdWQ==", + "dependencies": { + "@ctrl/tinycolor": "^3.4.0" + } + }, + "node_modules/@ant-design/icons-svg": { + "version": "4.2.1", + "resolved": "https://registry.npmmirror.com/@ant-design/icons-svg/-/icons-svg-4.2.1.tgz", + "integrity": "sha512-EB0iwlKDGpG93hW8f85CTJTs4SvMX7tt5ceupvhALp1IF44SeUFOMhKUOYqpsoYWQKAOuTRDMqn75rEaKDp0Xw==" + }, + "node_modules/@ant-design/icons-vue": { + "version": "6.1.0", + "resolved": "https://registry.npmmirror.com/@ant-design/icons-vue/-/icons-vue-6.1.0.tgz", + "integrity": "sha512-EX6bYm56V+ZrKN7+3MT/ubDkvJ5rK/O2t380WFRflDcVFgsvl3NLH7Wxeau6R8DbrO5jWR6DSTC3B6gYFp77AA==", + "dependencies": { + "@ant-design/colors": "^6.0.0", + "@ant-design/icons-svg": "^4.2.1" + }, + "peerDependencies": { + "vue": ">=3.0.3" + } + }, + "node_modules/@babel/parser": { + "version": "7.17.9", + "resolved": "https://registry.npmmirror.com/@babel/parser/-/parser-7.17.9.tgz", + "integrity": "sha512-vqUSBLP8dQHFPdPi9bc5GK9vRkYHJ49fsZdtoJ8EQ8ibpwk5rPKfvNIwChB0KVXcIjcepEBBd2VHC5r9Gy8ueg==", + "license": "MIT", + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.17.9", + "resolved": "https://registry.npmmirror.com/@babel/runtime/-/runtime-7.17.9.tgz", + "integrity": "sha512-lSiBBvodq29uShpWGNbgFdKYNiFDo5/HIYsaCEY9ff4sb10x9jizo2+pRrSyF4jKZCXqgzuqBOQKbUm90gQwJg==", + "dependencies": { + "regenerator-runtime": "^0.13.4" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@ctrl/tinycolor": { + "version": "3.4.1", + "resolved": "https://registry.npmmirror.com/@ctrl/tinycolor/-/tinycolor-3.4.1.tgz", + "integrity": "sha512-ej5oVy6lykXsvieQtqZxCOaLT+xD4+QNarq78cIYISHmZXshCvROLudpQN3lfL8G0NL7plMSSK+zlyvCaIJ4Iw==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/@element-plus/icons-vue": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/@element-plus/icons-vue/-/icons-vue-1.1.4.tgz", + "integrity": "sha512-Iz/nHqdp1sFPmdzRwHkEQQA3lKvoObk8azgABZ81QUOpW9s/lUyQVUSh0tNtEPZXQlKwlSh7SPgoVxzrE0uuVQ==", + "license": "MIT", + "peerDependencies": { + "vue": "^3.2.0" + } + }, + "node_modules/@floating-ui/core": { + "version": "0.6.1", + "resolved": "https://registry.npmmirror.com/@floating-ui/core/-/core-0.6.1.tgz", + "integrity": "sha512-Y30eVMcZva8o84c0HcXAtDO4BEzPJMvF6+B7x7urL2xbAqVsGJhojOyHLaoQHQYjb6OkqRq5kO+zeySycQwKqg==", + "license": "MIT" + }, + "node_modules/@floating-ui/dom": { + "version": "0.4.4", + "resolved": "https://registry.npmmirror.com/@floating-ui/dom/-/dom-0.4.4.tgz", + "integrity": "sha512-0Ulu3B/dqQplUUSqnTx0foSrlYuMN+GTtlJWvNJwt6Fr7/PqmlR/Y08o6/+bxDWr6p3roBJRaQ51MDZsNmEhhw==", + "license": "MIT", + "dependencies": { + "@floating-ui/core": "^0.6.1" + } + }, + "node_modules/@popperjs/core": { + "version": "2.11.5", + "resolved": "https://registry.npmmirror.com/@popperjs/core/-/core-2.11.5.tgz", + "integrity": "sha512-9X2obfABZuDVLCgPK9aX0a/x4jaOEweTTWE2+9sr0Qqqevj2Uv5XorvusThmc9XGYpS9yI+fhh8RTafBtGposw==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/popperjs" + } + }, + "node_modules/@simonwep/pickr": { + "version": "1.8.2", + "resolved": "https://registry.npmmirror.com/@simonwep/pickr/-/pickr-1.8.2.tgz", + "integrity": "sha512-/l5w8BIkrpP6n1xsetx9MWPWlU6OblN5YgZZphxan0Tq4BByTCETL6lyIeY8lagalS2Nbt4F2W034KHLIiunKA==", + "dependencies": { + "core-js": "^3.15.1", + "nanopop": "^2.1.0" + } + }, + "node_modules/@types/lodash": { + "version": "4.14.181", + "resolved": "https://registry.npmmirror.com/@types/lodash/-/lodash-4.14.181.tgz", + "integrity": "sha512-n3tyKthHJbkiWhDZs3DkhkCzt2MexYHXlX0td5iMplyfwketaOeKboEVBqzceH7juqvEg3q5oUoBFxSLu7zFag==", + "license": "MIT" + }, + "node_modules/@types/lodash-es": { + "version": "4.17.6", + "resolved": "https://registry.npmmirror.com/@types/lodash-es/-/lodash-es-4.17.6.tgz", + "integrity": "sha512-R+zTeVUKDdfoRxpAryaQNRKk3105Rrgx2CFRClIgRGaqDTdjsm8h6IYA8ir584W3ePzkZfst5xIgDwYrlh9HLg==", + "license": "MIT", + "dependencies": { + "@types/lodash": "*" + } + }, + "node_modules/@vitejs/plugin-vue": { + "version": "2.3.1", + "resolved": "https://registry.npmmirror.com/@vitejs/plugin-vue/-/plugin-vue-2.3.1.tgz", + "integrity": "sha512-YNzBt8+jt6bSwpt7LP890U1UcTOIZZxfpE5WOJ638PNxSEKOqAi0+FSKS0nVeukfdZ0Ai/H7AFd6k3hayfGZqQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "vite": "^2.5.10", + "vue": "^3.2.25" + } + }, + "node_modules/@vue/compiler-core": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/compiler-core/-/compiler-core-3.2.32.tgz", + "integrity": "sha512-bRQ8Rkpm/aYFElDWtKkTPHeLnX5pEkNxhPUcqu5crEJIilZH0yeFu/qUAcV4VfSE2AudNPkQSOwMZofhnuutmA==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.16.4", + "@vue/shared": "3.2.32", + "estree-walker": "^2.0.2", + "source-map": "^0.6.1" + } + }, + "node_modules/@vue/compiler-dom": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/compiler-dom/-/compiler-dom-3.2.32.tgz", + "integrity": "sha512-maa3PNB/NxR17h2hDQfcmS02o1f9r9QIpN1y6fe8tWPrS1E4+q8MqrvDDQNhYVPd84rc3ybtyumrgm9D5Rf/kg==", + "license": "MIT", + "dependencies": { + "@vue/compiler-core": "3.2.32", + "@vue/shared": "3.2.32" + } + }, + "node_modules/@vue/compiler-sfc": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/compiler-sfc/-/compiler-sfc-3.2.32.tgz", + "integrity": "sha512-uO6+Gh3AVdWm72lRRCjMr8nMOEqc6ezT9lWs5dPzh1E9TNaJkMYPaRtdY9flUv/fyVQotkfjY/ponjfR+trPSg==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.16.4", + "@vue/compiler-core": "3.2.32", + "@vue/compiler-dom": "3.2.32", + "@vue/compiler-ssr": "3.2.32", + "@vue/reactivity-transform": "3.2.32", + "@vue/shared": "3.2.32", + "estree-walker": "^2.0.2", + "magic-string": "^0.25.7", + "postcss": "^8.1.10", + "source-map": "^0.6.1" + } + }, + "node_modules/@vue/compiler-ssr": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/compiler-ssr/-/compiler-ssr-3.2.32.tgz", + "integrity": "sha512-ZklVUF/SgTx6yrDUkaTaBL/JMVOtSocP+z5Xz/qIqqLdW/hWL90P+ob/jOQ0Xc/om57892Q7sRSrex0wujOL2Q==", + "license": "MIT", + "dependencies": { + "@vue/compiler-dom": "3.2.32", + "@vue/shared": "3.2.32" + } + }, + "node_modules/@vue/reactivity": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/reactivity/-/reactivity-3.2.32.tgz", + "integrity": "sha512-4zaDumuyDqkuhbb63hRd+YHFGopW7srFIWesLUQ2su/rJfWrSq3YUvoKAJE8Eu1EhZ2Q4c1NuwnEreKj1FkDxA==", + "license": "MIT", + "dependencies": { + "@vue/shared": "3.2.32" + } + }, + "node_modules/@vue/reactivity-transform": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/reactivity-transform/-/reactivity-transform-3.2.32.tgz", + "integrity": "sha512-CW1W9zaJtE275tZSWIfQKiPG0iHpdtSlmTqYBu7Y62qvtMgKG5yOxtvBs4RlrZHlaqFSE26avLAgQiTp4YHozw==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.16.4", + "@vue/compiler-core": "3.2.32", + "@vue/shared": "3.2.32", + "estree-walker": "^2.0.2", + "magic-string": "^0.25.7" + } + }, + "node_modules/@vue/runtime-core": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/runtime-core/-/runtime-core-3.2.32.tgz", + "integrity": "sha512-uKKzK6LaCnbCJ7rcHvsK0azHLGpqs+Vi9B28CV1mfWVq1F3Bj8Okk3cX+5DtD06aUh4V2bYhS2UjjWiUUKUF0w==", + "license": "MIT", + "dependencies": { + "@vue/reactivity": "3.2.32", + "@vue/shared": "3.2.32" + } + }, + "node_modules/@vue/runtime-dom": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/runtime-dom/-/runtime-dom-3.2.32.tgz", + "integrity": "sha512-AmlIg+GPqjkNoADLjHojEX5RGcAg+TsgXOOcUrtDHwKvA8mO26EnLQLB8nylDjU6AMJh2CIYn8NEgyOV5ZIScQ==", + "license": "MIT", + "dependencies": { + "@vue/runtime-core": "3.2.32", + "@vue/shared": "3.2.32", + "csstype": "^2.6.8" + } + }, + "node_modules/@vue/server-renderer": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/server-renderer/-/server-renderer-3.2.32.tgz", + "integrity": "sha512-TYKpZZfRJpGTTiy/s6bVYwQJpAUx3G03z4G7/3O18M11oacrMTVHaHjiPuPqf3xQtY8R4LKmQ3EOT/DRCA/7Wg==", + "license": "MIT", + "dependencies": { + "@vue/compiler-ssr": "3.2.32", + "@vue/shared": "3.2.32" + }, + "peerDependencies": { + "vue": "3.2.32" + } + }, + "node_modules/@vue/shared": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/shared/-/shared-3.2.32.tgz", + "integrity": "sha512-bjcixPErUsAnTQRQX4Z5IQnICYjIfNCyCl8p29v1M6kfVzvwOICPw+dz48nNuWlTOOx2RHhzHdazJibE8GSnsw==", + "license": "MIT" + }, + "node_modules/@vueuse/core": { + "version": "8.2.5", + "resolved": "https://registry.npmmirror.com/@vueuse/core/-/core-8.2.5.tgz", + "integrity": "sha512-5prZAA1Ji2ltwNUnzreu6WIXYqHYP/9U2BiY5mD/650VYLpVcwVlYznJDFcLCmEWI3o3Vd34oS1FUf+6Mh68GQ==", + "license": "MIT", + "dependencies": { + "@vueuse/metadata": "8.2.5", + "@vueuse/shared": "8.2.5", + "vue-demi": "*" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + }, + "peerDependencies": { + "@vue/composition-api": "^1.1.0", + "vue": "^2.6.0 || ^3.2.0" + }, + "peerDependenciesMeta": { + "@vue/composition-api": { + "optional": true + }, + "vue": { + "optional": true + } + } + }, + "node_modules/@vueuse/metadata": { + "version": "8.2.5", + "resolved": "https://registry.npmmirror.com/@vueuse/metadata/-/metadata-8.2.5.tgz", + "integrity": "sha512-Lk9plJjh9cIdiRdcj16dau+2LANxIdFCiTgdfzwYXbflxq0QnMBeOD2qHgKDE7fuVrtPcVWj8VSuZEx1HRfNQA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/@vueuse/shared": { + "version": "8.2.5", + "resolved": "https://registry.npmmirror.com/@vueuse/shared/-/shared-8.2.5.tgz", + "integrity": "sha512-lNWo+7sk6JCuOj4AiYM+6HZ6fq4xAuVq1sVckMQKgfCJZpZRe4i8es+ZULO5bYTKP+VrOCtqrLR2GzEfrbr3YQ==", + "license": "MIT", + "dependencies": { + "vue-demi": "*" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + }, + "peerDependencies": { + "@vue/composition-api": "^1.1.0", + "vue": "^2.6.0 || ^3.2.0" + }, + "peerDependenciesMeta": { + "@vue/composition-api": { + "optional": true + }, + "vue": { + "optional": true + } + } + }, + "node_modules/ant-design-vue": { + "version": "2.2.8", + "resolved": "https://registry.npmmirror.com/ant-design-vue/-/ant-design-vue-2.2.8.tgz", + "integrity": "sha512-3graq9/gCfJQs6hznrHV6sa9oDmk/D1H3Oo0vLdVpPS/I61fZPk8NEyNKCHpNA6fT2cx6xx9U3QS63uuyikg/Q==", + "dependencies": { + "@ant-design/icons-vue": "^6.0.0", + "@babel/runtime": "^7.10.5", + "@simonwep/pickr": "~1.8.0", + "array-tree-filter": "^2.1.0", + "async-validator": "^3.3.0", + "dom-align": "^1.12.1", + "dom-scroll-into-view": "^2.0.0", + "lodash": "^4.17.21", + "lodash-es": "^4.17.15", + "moment": "^2.27.0", + "omit.js": "^2.0.0", + "resize-observer-polyfill": "^1.5.1", + "scroll-into-view-if-needed": "^2.2.25", + "shallow-equal": "^1.0.0", + "vue-types": "^3.0.0", + "warning": "^4.0.0" + }, + "peerDependencies": { + "@vue/compiler-sfc": ">=3.1.0", + "vue": ">=3.1.0" + } + }, + "node_modules/ant-design-vue/node_modules/async-validator": { + "version": "3.5.2", + "resolved": "https://registry.npmmirror.com/async-validator/-/async-validator-3.5.2.tgz", + "integrity": "sha512-8eLCg00W9pIRZSB781UUX/H6Oskmm8xloZfr09lz5bikRpBVDlJ3hRVuxxP1SxcwsEYfJ4IU8Q19Y8/893r3rQ==" + }, + "node_modules/array-tree-filter": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/array-tree-filter/-/array-tree-filter-2.1.0.tgz", + "integrity": "sha512-4ROwICNlNw/Hqa9v+rk5h22KjmzB1JGTMVKP2AKJBOCgb0yL0ASf0+YvCcLNNwquOHNX48jkeZIJ3a+oOQqKcw==" + }, + "node_modules/async-validator": { + "version": "4.0.7", + "resolved": "https://registry.npmmirror.com/async-validator/-/async-validator-4.0.7.tgz", + "integrity": "sha512-Pj2IR7u8hmUEDOwB++su6baaRi+QvsgajuFB9j95foM1N2gy5HM4z60hfusIO0fBPG5uLAEl6yCJr1jNSVugEQ==", + "license": "MIT" + }, + "node_modules/axios": { + "version": "0.26.1", + "resolved": "https://registry.npmmirror.com/axios/-/axios-0.26.1.tgz", + "integrity": "sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA==", + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.14.8" + } + }, + "node_modules/axios/node_modules/follow-redirects": { + "version": "1.14.9", + "resolved": "https://registry.npmmirror.com/follow-redirects/-/follow-redirects-1.14.9.tgz", + "integrity": "sha512-MQDfihBQYMcyy5dhRDJUHcw7lb2Pv/TuE6xP1vyraLukNDHKbDxDNaOE3NbCAdKQApno+GPRyo1YAp89yCjK4w==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/compute-scroll-into-view": { + "version": "1.0.17", + "resolved": "https://registry.npmmirror.com/compute-scroll-into-view/-/compute-scroll-into-view-1.0.17.tgz", + "integrity": "sha512-j4dx+Fb0URmzbwwMUrhqWM2BEWHdFGx+qZ9qqASHRPqvTYdqvWnHg0H1hIbcyLnvgnoNAVMlwkepyqM3DaIFUg==" + }, + "node_modules/copy-anything": { + "version": "2.0.6", + "resolved": "https://registry.npmmirror.com/copy-anything/-/copy-anything-2.0.6.tgz", + "integrity": "sha512-1j20GZTsvKNkc4BY3NpMOM8tt///wY3FpIzozTOFO2ffuZcV61nojHXVKIy3WM+7ADCy5FVhdZYHYDdgTU0yJw==", + "dependencies": { + "is-what": "^3.14.1" + } + }, + "node_modules/core-js": { + "version": "3.22.5", + "resolved": "https://registry.npmmirror.com/core-js/-/core-js-3.22.5.tgz", + "integrity": "sha512-VP/xYuvJ0MJWRAobcmQ8F2H6Bsn+s7zqAAjFaHGBMc5AQm7zaelhD1LGduFn2EehEcQcU+br6t+fwbpQ5d1ZWA==", + "hasInstallScript": true + }, + "node_modules/csstype": { + "version": "2.6.20", + "resolved": "https://registry.npmmirror.com/csstype/-/csstype-2.6.20.tgz", + "integrity": "sha512-/WwNkdXfckNgw6S5R125rrW8ez139lBHWouiBvX8dfMFtcn6V81REDqnH7+CRpRipfYlyU1CmOnOxrmGcFOjeA==", + "license": "MIT" + }, + "node_modules/dayjs": { + "version": "1.11.0", + "resolved": "https://registry.npmmirror.com/dayjs/-/dayjs-1.11.0.tgz", + "integrity": "sha512-JLC809s6Y948/FuCZPm5IX8rRhQwOiyMb2TfVVQEixG7P8Lm/gt5S7yoQZmC8x1UehI9Pb7sksEt4xx14m+7Ug==", + "license": "MIT" + }, + "node_modules/dom-align": { + "version": "1.12.3", + "resolved": "https://registry.npmmirror.com/dom-align/-/dom-align-1.12.3.tgz", + "integrity": "sha512-Gj9hZN3a07cbR6zviMUBOMPdWxYhbMI+x+WS0NAIu2zFZmbK8ys9R79g+iG9qLnlCwpFoaB+fKy8Pdv470GsPA==" + }, + "node_modules/dom-scroll-into-view": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/dom-scroll-into-view/-/dom-scroll-into-view-2.0.1.tgz", + "integrity": "sha512-bvVTQe1lfaUr1oFzZX80ce9KLDlZ3iU+XGNE/bz9HnGdklTieqsbmsLHe+rT2XWqopvL0PckkYqN7ksmm5pe3w==" + }, + "node_modules/element-plus": { + "version": "2.1.9", + "resolved": "https://registry.npmmirror.com/element-plus/-/element-plus-2.1.9.tgz", + "integrity": "sha512-6mWqS3YrmJPnouWP4otzL8+MehfOnDFqDbcIdnmC07p+Z0JkWe/CVKc4Wky8AYC8nyDMUQyiZYvooCbqGuM7pg==", + "license": "MIT", + "dependencies": { + "@ctrl/tinycolor": "^3.4.0", + "@element-plus/icons-vue": "^1.1.4", + "@floating-ui/dom": "^0.4.2", + "@popperjs/core": "^2.11.4", + "@types/lodash": "^4.14.181", + "@types/lodash-es": "^4.17.6", + "@vueuse/core": "^8.2.4", + "async-validator": "^4.0.7", + "dayjs": "^1.11.0", + "escape-html": "^1.0.3", + "lodash": "^4.17.21", + "lodash-es": "^4.17.21", + "lodash-unified": "^1.0.2", + "memoize-one": "^6.0.0", + "normalize-wheel-es": "^1.1.2" + }, + "peerDependencies": { + "vue": "^3.2.0" + } + }, + "node_modules/errno": { + "version": "0.1.8", + "resolved": "https://registry.npmmirror.com/errno/-/errno-0.1.8.tgz", + "integrity": "sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A==", + "optional": true, + "dependencies": { + "prr": "~1.0.1" + }, + "bin": { + "errno": "cli.js" + } + }, + "node_modules/esbuild": { + "version": "0.14.36", + "resolved": "https://registry.npmmirror.com/esbuild/-/esbuild-0.14.36.tgz", + "integrity": "sha512-HhFHPiRXGYOCRlrhpiVDYKcFJRdO0sBElZ668M4lh2ER0YgnkLxECuFe7uWCf23FrcLc59Pqr7dHkTqmRPDHmw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "esbuild-android-64": "0.14.36", + "esbuild-android-arm64": "0.14.36", + "esbuild-darwin-64": "0.14.36", + "esbuild-darwin-arm64": "0.14.36", + "esbuild-freebsd-64": "0.14.36", + "esbuild-freebsd-arm64": "0.14.36", + "esbuild-linux-32": "0.14.36", + "esbuild-linux-64": "0.14.36", + "esbuild-linux-arm": "0.14.36", + "esbuild-linux-arm64": "0.14.36", + "esbuild-linux-mips64le": "0.14.36", + "esbuild-linux-ppc64le": "0.14.36", + "esbuild-linux-riscv64": "0.14.36", + "esbuild-linux-s390x": "0.14.36", + "esbuild-netbsd-64": "0.14.36", + "esbuild-openbsd-64": "0.14.36", + "esbuild-sunos-64": "0.14.36", + "esbuild-windows-32": "0.14.36", + "esbuild-windows-64": "0.14.36", + "esbuild-windows-arm64": "0.14.36" + } + }, + "node_modules/esbuild-darwin-64": { + "version": "0.14.36", + "resolved": "https://registry.npmmirror.com/esbuild-darwin-64/-/esbuild-darwin-64-0.14.36.tgz", + "integrity": "sha512-kkl6qmV0dTpyIMKagluzYqlc1vO0ecgpviK/7jwPbRDEv5fejRTaBBEE2KxEQbTHcLhiiDbhG7d5UybZWo/1zQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" + }, + "node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "license": "MIT" + }, + "node_modules/fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmmirror.com/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", + "dev": true, + "license": "MIT" + }, + "node_modules/graceful-fs": { + "version": "4.2.10", + "resolved": "https://registry.npmmirror.com/graceful-fs/-/graceful-fs-4.2.10.tgz", + "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==", + "optional": true + }, + "node_modules/has": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.1" + }, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "optional": true, + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/image-size": { + "version": "0.5.5", + "resolved": "https://registry.npmmirror.com/image-size/-/image-size-0.5.5.tgz", + "integrity": "sha512-6TDAlDPZxUFCv+fuOkIoXT/V/f3Qbq8e37p+YOiYrUv3v9cc3/6x78VdfPgFVaB9dZYeLUfKgHRebpkm/oP2VQ==", + "optional": true, + "bin": { + "image-size": "bin/image-size.js" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-core-module": { + "version": "2.8.1", + "resolved": "https://registry.npmmirror.com/is-core-module/-/is-core-module-2.8.1.tgz", + "integrity": "sha512-SdNCUs284hr40hFTFP6l0IfZ/RSrMXF3qgoRHd3/79unUTvrFO/JoXwkGm+5J/Oe3E/b5GsnG330uUNgRpu1PA==", + "dev": true, + "license": "MIT", + "dependencies": { + "has": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-plain-object": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/is-plain-object/-/is-plain-object-3.0.1.tgz", + "integrity": "sha512-Xnpx182SBMrr/aBik8y+GuR4U1L9FqMSojwDQwPMmxyC6bvEqly9UBCxhauBF5vNh2gwWJNX6oDV7O+OM4z34g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-what": { + "version": "3.14.1", + "resolved": "https://registry.npmmirror.com/is-what/-/is-what-3.14.1.tgz", + "integrity": "sha512-sNxgpk9793nzSs7bA6JQJGeIuRBQhAaNGG77kzYQgMkrID+lS6SlK07K5LaptscDlSaIgH+GPFzf+d75FVxozA==" + }, + "node_modules/js-audio-recorder": { + "version": "0.5.7", + "resolved": "https://registry.npmmirror.com/js-audio-recorder/-/js-audio-recorder-0.5.7.tgz", + "integrity": "sha512-DIlv30N86AYHr7zGHN0O7V/3Rd8Q6SIJ/MBzVJaT9STWTdhF4E/8fxCX6ZMgRSv8xmx6fEqcFFNPoofmxJD4+A==", + "license": "MIT" + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/lamejs": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/lamejs/-/lamejs-1.2.1.tgz", + "integrity": "sha512-s7bxvjvYthw6oPLCm5pFxvA84wUROODB8jEO2+CE1adhKgrIvVOlmMgY8zyugxGrvRaDHNJanOiS21/emty6dQ==", + "license": "LGPL-3.0", + "dependencies": { + "use-strict": "1.0.1" + } + }, + "node_modules/less": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/less/-/less-4.1.2.tgz", + "integrity": "sha512-EoQp/Et7OSOVu0aJknJOtlXZsnr8XE8KwuzTHOLeVSEx8pVWUICc8Q0VYRHgzyjX78nMEyC/oztWFbgyhtNfDA==", + "dependencies": { + "copy-anything": "^2.0.1", + "parse-node-version": "^1.0.1", + "tslib": "^2.3.0" + }, + "bin": { + "lessc": "bin/lessc" + }, + "engines": { + "node": ">=6" + }, + "optionalDependencies": { + "errno": "^0.1.1", + "graceful-fs": "^4.1.2", + "image-size": "~0.5.0", + "make-dir": "^2.1.0", + "mime": "^1.4.1", + "needle": "^2.5.2", + "source-map": "~0.6.0" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmmirror.com/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "license": "MIT" + }, + "node_modules/lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmmirror.com/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==", + "license": "MIT" + }, + "node_modules/lodash-unified": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/lodash-unified/-/lodash-unified-1.0.2.tgz", + "integrity": "sha512-OGbEy+1P+UT26CYi4opY4gebD8cWRDxAT6MAObIVQMiqYdxZr1g3QHWCToVsm31x2NkLS4K3+MC2qInaRMa39g==", + "license": "MIT", + "peerDependencies": { + "@types/lodash-es": "*", + "lodash": "*", + "lodash-es": "*" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmmirror.com/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/magic-string": { + "version": "0.25.9", + "resolved": "https://registry.npmmirror.com/magic-string/-/magic-string-0.25.9.tgz", + "integrity": "sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==", + "license": "MIT", + "dependencies": { + "sourcemap-codec": "^1.4.8" + } + }, + "node_modules/make-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/make-dir/-/make-dir-2.1.0.tgz", + "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", + "optional": true, + "dependencies": { + "pify": "^4.0.1", + "semver": "^5.6.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/memoize-one": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/memoize-one/-/memoize-one-6.0.0.tgz", + "integrity": "sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==", + "license": "MIT" + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmmirror.com/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "optional": true, + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/moment": { + "version": "2.29.3", + "resolved": "https://registry.npmmirror.com/moment/-/moment-2.29.3.tgz", + "integrity": "sha512-c6YRvhEo//6T2Jz/vVtYzqBzwvPT95JBQ+smCytzf7c50oMZRsR/a4w88aD34I+/QVSfnoAnSBFPJHItlOMJVw==", + "engines": { + "node": "*" + } + }, + "node_modules/nanoid": { + "version": "3.3.2", + "resolved": "https://registry.npmmirror.com/nanoid/-/nanoid-3.3.2.tgz", + "integrity": "sha512-CuHBogktKwpm5g2sRgv83jEy2ijFzBwMoYA60orPDR7ynsLijJDqgsi4RDGj3OJpy3Ieb+LYwiRmIOGyytgITA==", + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/nanopop": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/nanopop/-/nanopop-2.1.0.tgz", + "integrity": "sha512-jGTwpFRexSH+fxappnGQtN9dspgE2ipa1aOjtR24igG0pv6JCxImIAmrLRHX+zUF5+1wtsFVbKyfP51kIGAVNw==" + }, + "node_modules/needle": { + "version": "2.9.1", + "resolved": "https://registry.npmmirror.com/needle/-/needle-2.9.1.tgz", + "integrity": "sha512-6R9fqJ5Zcmf+uYaFgdIHmLwNldn5HbK8L5ybn7Uz+ylX/rnOsSp1AHcvQSrCaFN+qNM1wpymHqD7mVasEOlHGQ==", + "optional": true, + "dependencies": { + "debug": "^3.2.6", + "iconv-lite": "^0.4.4", + "sax": "^1.2.4" + }, + "bin": { + "needle": "bin/needle" + }, + "engines": { + "node": ">= 4.4.x" + } + }, + "node_modules/needle/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmmirror.com/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "optional": true, + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/needle/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "optional": true + }, + "node_modules/normalize-wheel-es": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/normalize-wheel-es/-/normalize-wheel-es-1.1.2.tgz", + "integrity": "sha512-scX83plWJXYH1J4+BhAuIHadROzxX0UBF3+HuZNY2Ks8BciE7tSTQ+5JhTsvzjaO0/EJdm4JBGrfObKxFf3Png==", + "license": "BSD-3-Clause" + }, + "node_modules/omit.js": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/omit.js/-/omit.js-2.0.2.tgz", + "integrity": "sha512-hJmu9D+bNB40YpL9jYebQl4lsTW6yEHRTroJzNLqQJYHm7c+NQnJGfZmIWh8S3q3KoaxV1aLhV6B3+0N0/kyJg==" + }, + "node_modules/parse-node-version": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/parse-node-version/-/parse-node-version-1.0.1.tgz", + "integrity": "sha512-3YHlOa/JgH6Mnpr05jP9eDG254US9ek25LyIxZlDItp2iJtwyaXQb57lBYLdT3MowkUFYEV2XXNAYIPlESvJlA==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmmirror.com/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "license": "ISC" + }, + "node_modules/pify": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "optional": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/postcss": { + "version": "8.4.12", + "resolved": "https://registry.npmmirror.com/postcss/-/postcss-8.4.12.tgz", + "integrity": "sha512-lg6eITwYe9v6Hr5CncVbK70SoioNQIq81nsaG86ev5hAidQvmOeETBqs7jm43K2F5/Ley3ytDtriImV6TpNiSg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.1", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/prr": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/prr/-/prr-1.0.1.tgz", + "integrity": "sha512-yPw4Sng1gWghHQWj0B3ZggWUm4qVbPwPFcRG8KyxiU7J2OHFSoEHKS+EZ3fv5l1t9CyCiop6l/ZYeWbrgoQejw==", + "optional": true + }, + "node_modules/regenerator-runtime": { + "version": "0.13.9", + "resolved": "https://registry.npmmirror.com/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz", + "integrity": "sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA==" + }, + "node_modules/resize-observer-polyfill": { + "version": "1.5.1", + "resolved": "https://registry.npmmirror.com/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz", + "integrity": "sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg==" + }, + "node_modules/resolve": { + "version": "1.22.0", + "resolved": "https://registry.npmmirror.com/resolve/-/resolve-1.22.0.tgz", + "integrity": "sha512-Hhtrw0nLeSrFQ7phPp4OOcVjLPIeMnRlr5mcnVuMe7M/7eBn98A3hmFRLoFo3DLZkivSYwhRUJTyPyWAk56WLw==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.8.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/rollup": { + "version": "2.70.1", + "resolved": "https://registry.npmmirror.com/rollup/-/rollup-2.70.1.tgz", + "integrity": "sha512-CRYsI5EuzLbXdxC6RnYhOuRdtz4bhejPMSWjsFLfVM/7w/85n2szZv6yExqUXsBdz5KT8eoubeyDUDjhLHEslA==", + "dev": true, + "license": "MIT", + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=10.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmmirror.com/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "optional": true + }, + "node_modules/sax": { + "version": "1.2.4", + "resolved": "https://registry.npmmirror.com/sax/-/sax-1.2.4.tgz", + "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==", + "optional": true + }, + "node_modules/scroll-into-view-if-needed": { + "version": "2.2.29", + "resolved": "https://registry.npmmirror.com/scroll-into-view-if-needed/-/scroll-into-view-if-needed-2.2.29.tgz", + "integrity": "sha512-hxpAR6AN+Gh53AdAimHM6C8oTN1ppwVZITihix+WqalywBeFcQ6LdQP5ABNl26nX8GTEL7VT+b8lKpdqq65wXg==", + "dependencies": { + "compute-scroll-into-view": "^1.0.17" + } + }, + "node_modules/semver": { + "version": "5.7.1", + "resolved": "https://registry.npmmirror.com/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "optional": true, + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/shallow-equal": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/shallow-equal/-/shallow-equal-1.2.1.tgz", + "integrity": "sha512-S4vJDjHHMBaiZuT9NPb616CSmLf618jawtv3sufLl6ivK8WocjAo58cXwbRV1cgqxH0Qbv+iUt6m05eqEa2IRA==" + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmmirror.com/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/source-map-js/-/source-map-js-1.0.2.tgz", + "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sourcemap-codec": { + "version": "1.4.8", + "resolved": "https://registry.npmmirror.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz", + "integrity": "sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==", + "license": "MIT" + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tslib": { + "version": "2.4.0", + "resolved": "https://registry.npmmirror.com/tslib/-/tslib-2.4.0.tgz", + "integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==" + }, + "node_modules/use-strict": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/use-strict/-/use-strict-1.0.1.tgz", + "integrity": "sha512-IeiWvvEXfW5ltKVMkxq6FvNf2LojMKvB2OCeja6+ct24S1XOmQw2dGr2JyndwACWAGJva9B7yPHwAmeA9QCqAQ==", + "license": "ISC" + }, + "node_modules/vite": { + "version": "2.9.1", + "resolved": "https://registry.npmmirror.com/vite/-/vite-2.9.1.tgz", + "integrity": "sha512-vSlsSdOYGcYEJfkQ/NeLXgnRv5zZfpAsdztkIrs7AZHV8RCMZQkwjo4DS5BnrYTqoWqLoUe1Cah4aVO4oNNqCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.14.27", + "postcss": "^8.4.12", + "resolve": "^1.22.0", + "rollup": "^2.59.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": ">=12.2.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + }, + "peerDependencies": { + "less": "*", + "sass": "*", + "stylus": "*" + }, + "peerDependenciesMeta": { + "less": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + } + } + }, + "node_modules/vue": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/vue/-/vue-3.2.32.tgz", + "integrity": "sha512-6L3jKZApF042OgbCkh+HcFeAkiYi3Lovi8wNhWqIK98Pi5efAMLZzRHgi91v+60oIRxdJsGS9sTMsb+yDpY8Eg==", + "license": "MIT", + "dependencies": { + "@vue/compiler-dom": "3.2.32", + "@vue/compiler-sfc": "3.2.32", + "@vue/runtime-dom": "3.2.32", + "@vue/server-renderer": "3.2.32", + "@vue/shared": "3.2.32" + } + }, + "node_modules/vue-demi": { + "version": "0.12.5", + "resolved": "https://registry.npmmirror.com/vue-demi/-/vue-demi-0.12.5.tgz", + "integrity": "sha512-BREuTgTYlUr0zw0EZn3hnhC3I6gPWv+Kwh4MCih6QcAeaTlaIX0DwOVN0wHej7hSvDPecz4jygy/idsgKfW58Q==", + "hasInstallScript": true, + "license": "MIT", + "bin": { + "vue-demi-fix": "bin/vue-demi-fix.js", + "vue-demi-switch": "bin/vue-demi-switch.js" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + }, + "peerDependencies": { + "@vue/composition-api": "^1.0.0-rc.1", + "vue": "^3.0.0-0 || ^2.6.0" + }, + "peerDependenciesMeta": { + "@vue/composition-api": { + "optional": true + } + } + }, + "node_modules/vue-types": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/vue-types/-/vue-types-3.0.2.tgz", + "integrity": "sha512-IwUC0Aq2zwaXqy74h4WCvFCUtoV0iSWr0snWnE9TnU18S66GAQyqQbRf2qfJtUuiFsBf6qp0MEwdonlwznlcrw==", + "dependencies": { + "is-plain-object": "3.0.1" + }, + "engines": { + "node": ">=10.15.0" + }, + "peerDependencies": { + "vue": "^3.0.0" + } + }, + "node_modules/warning": { + "version": "4.0.3", + "resolved": "https://registry.npmmirror.com/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "dependencies": { + "loose-envify": "^1.0.0" + } + } + }, + "dependencies": { + "@ant-design/colors": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/@ant-design/colors/-/colors-6.0.0.tgz", + "integrity": "sha512-qAZRvPzfdWHtfameEGP2Qvuf838NhergR35o+EuVyB5XvSA98xod5r4utvi4TJ3ywmevm290g9nsCG5MryrdWQ==", + "requires": { + "@ctrl/tinycolor": "^3.4.0" + } + }, + "@ant-design/icons-svg": { + "version": "4.2.1", + "resolved": "https://registry.npmmirror.com/@ant-design/icons-svg/-/icons-svg-4.2.1.tgz", + "integrity": "sha512-EB0iwlKDGpG93hW8f85CTJTs4SvMX7tt5ceupvhALp1IF44SeUFOMhKUOYqpsoYWQKAOuTRDMqn75rEaKDp0Xw==" + }, + "@ant-design/icons-vue": { + "version": "6.1.0", + "resolved": "https://registry.npmmirror.com/@ant-design/icons-vue/-/icons-vue-6.1.0.tgz", + "integrity": "sha512-EX6bYm56V+ZrKN7+3MT/ubDkvJ5rK/O2t380WFRflDcVFgsvl3NLH7Wxeau6R8DbrO5jWR6DSTC3B6gYFp77AA==", + "requires": { + "@ant-design/colors": "^6.0.0", + "@ant-design/icons-svg": "^4.2.1" + } + }, + "@babel/parser": { + "version": "7.17.9", + "resolved": "https://registry.npmmirror.com/@babel/parser/-/parser-7.17.9.tgz", + "integrity": "sha512-vqUSBLP8dQHFPdPi9bc5GK9vRkYHJ49fsZdtoJ8EQ8ibpwk5rPKfvNIwChB0KVXcIjcepEBBd2VHC5r9Gy8ueg==" + }, + "@babel/runtime": { + "version": "7.17.9", + "resolved": "https://registry.npmmirror.com/@babel/runtime/-/runtime-7.17.9.tgz", + "integrity": "sha512-lSiBBvodq29uShpWGNbgFdKYNiFDo5/HIYsaCEY9ff4sb10x9jizo2+pRrSyF4jKZCXqgzuqBOQKbUm90gQwJg==", + "requires": { + "regenerator-runtime": "^0.13.4" + } + }, + "@ctrl/tinycolor": { + "version": "3.4.1", + "resolved": "https://registry.npmmirror.com/@ctrl/tinycolor/-/tinycolor-3.4.1.tgz", + "integrity": "sha512-ej5oVy6lykXsvieQtqZxCOaLT+xD4+QNarq78cIYISHmZXshCvROLudpQN3lfL8G0NL7plMSSK+zlyvCaIJ4Iw==" + }, + "@element-plus/icons-vue": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/@element-plus/icons-vue/-/icons-vue-1.1.4.tgz", + "integrity": "sha512-Iz/nHqdp1sFPmdzRwHkEQQA3lKvoObk8azgABZ81QUOpW9s/lUyQVUSh0tNtEPZXQlKwlSh7SPgoVxzrE0uuVQ==", + "requires": {} + }, + "@floating-ui/core": { + "version": "0.6.1", + "resolved": "https://registry.npmmirror.com/@floating-ui/core/-/core-0.6.1.tgz", + "integrity": "sha512-Y30eVMcZva8o84c0HcXAtDO4BEzPJMvF6+B7x7urL2xbAqVsGJhojOyHLaoQHQYjb6OkqRq5kO+zeySycQwKqg==" + }, + "@floating-ui/dom": { + "version": "0.4.4", + "resolved": "https://registry.npmmirror.com/@floating-ui/dom/-/dom-0.4.4.tgz", + "integrity": "sha512-0Ulu3B/dqQplUUSqnTx0foSrlYuMN+GTtlJWvNJwt6Fr7/PqmlR/Y08o6/+bxDWr6p3roBJRaQ51MDZsNmEhhw==", + "requires": { + "@floating-ui/core": "^0.6.1" + } + }, + "@popperjs/core": { + "version": "2.11.5", + "resolved": "https://registry.npmmirror.com/@popperjs/core/-/core-2.11.5.tgz", + "integrity": "sha512-9X2obfABZuDVLCgPK9aX0a/x4jaOEweTTWE2+9sr0Qqqevj2Uv5XorvusThmc9XGYpS9yI+fhh8RTafBtGposw==" + }, + "@simonwep/pickr": { + "version": "1.8.2", + "resolved": "https://registry.npmmirror.com/@simonwep/pickr/-/pickr-1.8.2.tgz", + "integrity": "sha512-/l5w8BIkrpP6n1xsetx9MWPWlU6OblN5YgZZphxan0Tq4BByTCETL6lyIeY8lagalS2Nbt4F2W034KHLIiunKA==", + "requires": { + "core-js": "^3.15.1", + "nanopop": "^2.1.0" + } + }, + "@types/lodash": { + "version": "4.14.181", + "resolved": "https://registry.npmmirror.com/@types/lodash/-/lodash-4.14.181.tgz", + "integrity": "sha512-n3tyKthHJbkiWhDZs3DkhkCzt2MexYHXlX0td5iMplyfwketaOeKboEVBqzceH7juqvEg3q5oUoBFxSLu7zFag==" + }, + "@types/lodash-es": { + "version": "4.17.6", + "resolved": "https://registry.npmmirror.com/@types/lodash-es/-/lodash-es-4.17.6.tgz", + "integrity": "sha512-R+zTeVUKDdfoRxpAryaQNRKk3105Rrgx2CFRClIgRGaqDTdjsm8h6IYA8ir584W3ePzkZfst5xIgDwYrlh9HLg==", + "requires": { + "@types/lodash": "*" + } + }, + "@vitejs/plugin-vue": { + "version": "2.3.1", + "resolved": "https://registry.npmmirror.com/@vitejs/plugin-vue/-/plugin-vue-2.3.1.tgz", + "integrity": "sha512-YNzBt8+jt6bSwpt7LP890U1UcTOIZZxfpE5WOJ638PNxSEKOqAi0+FSKS0nVeukfdZ0Ai/H7AFd6k3hayfGZqQ==", + "dev": true, + "requires": {} + }, + "@vue/compiler-core": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/compiler-core/-/compiler-core-3.2.32.tgz", + "integrity": "sha512-bRQ8Rkpm/aYFElDWtKkTPHeLnX5pEkNxhPUcqu5crEJIilZH0yeFu/qUAcV4VfSE2AudNPkQSOwMZofhnuutmA==", + "requires": { + "@babel/parser": "^7.16.4", + "@vue/shared": "3.2.32", + "estree-walker": "^2.0.2", + "source-map": "^0.6.1" + } + }, + "@vue/compiler-dom": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/compiler-dom/-/compiler-dom-3.2.32.tgz", + "integrity": "sha512-maa3PNB/NxR17h2hDQfcmS02o1f9r9QIpN1y6fe8tWPrS1E4+q8MqrvDDQNhYVPd84rc3ybtyumrgm9D5Rf/kg==", + "requires": { + "@vue/compiler-core": "3.2.32", + "@vue/shared": "3.2.32" + } + }, + "@vue/compiler-sfc": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/compiler-sfc/-/compiler-sfc-3.2.32.tgz", + "integrity": "sha512-uO6+Gh3AVdWm72lRRCjMr8nMOEqc6ezT9lWs5dPzh1E9TNaJkMYPaRtdY9flUv/fyVQotkfjY/ponjfR+trPSg==", + "requires": { + "@babel/parser": "^7.16.4", + "@vue/compiler-core": "3.2.32", + "@vue/compiler-dom": "3.2.32", + "@vue/compiler-ssr": "3.2.32", + "@vue/reactivity-transform": "3.2.32", + "@vue/shared": "3.2.32", + "estree-walker": "^2.0.2", + "magic-string": "^0.25.7", + "postcss": "^8.1.10", + "source-map": "^0.6.1" + } + }, + "@vue/compiler-ssr": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/compiler-ssr/-/compiler-ssr-3.2.32.tgz", + "integrity": "sha512-ZklVUF/SgTx6yrDUkaTaBL/JMVOtSocP+z5Xz/qIqqLdW/hWL90P+ob/jOQ0Xc/om57892Q7sRSrex0wujOL2Q==", + "requires": { + "@vue/compiler-dom": "3.2.32", + "@vue/shared": "3.2.32" + } + }, + "@vue/reactivity": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/reactivity/-/reactivity-3.2.32.tgz", + "integrity": "sha512-4zaDumuyDqkuhbb63hRd+YHFGopW7srFIWesLUQ2su/rJfWrSq3YUvoKAJE8Eu1EhZ2Q4c1NuwnEreKj1FkDxA==", + "requires": { + "@vue/shared": "3.2.32" + } + }, + "@vue/reactivity-transform": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/reactivity-transform/-/reactivity-transform-3.2.32.tgz", + "integrity": "sha512-CW1W9zaJtE275tZSWIfQKiPG0iHpdtSlmTqYBu7Y62qvtMgKG5yOxtvBs4RlrZHlaqFSE26avLAgQiTp4YHozw==", + "requires": { + "@babel/parser": "^7.16.4", + "@vue/compiler-core": "3.2.32", + "@vue/shared": "3.2.32", + "estree-walker": "^2.0.2", + "magic-string": "^0.25.7" + } + }, + "@vue/runtime-core": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/runtime-core/-/runtime-core-3.2.32.tgz", + "integrity": "sha512-uKKzK6LaCnbCJ7rcHvsK0azHLGpqs+Vi9B28CV1mfWVq1F3Bj8Okk3cX+5DtD06aUh4V2bYhS2UjjWiUUKUF0w==", + "requires": { + "@vue/reactivity": "3.2.32", + "@vue/shared": "3.2.32" + } + }, + "@vue/runtime-dom": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/runtime-dom/-/runtime-dom-3.2.32.tgz", + "integrity": "sha512-AmlIg+GPqjkNoADLjHojEX5RGcAg+TsgXOOcUrtDHwKvA8mO26EnLQLB8nylDjU6AMJh2CIYn8NEgyOV5ZIScQ==", + "requires": { + "@vue/runtime-core": "3.2.32", + "@vue/shared": "3.2.32", + "csstype": "^2.6.8" + } + }, + "@vue/server-renderer": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/server-renderer/-/server-renderer-3.2.32.tgz", + "integrity": "sha512-TYKpZZfRJpGTTiy/s6bVYwQJpAUx3G03z4G7/3O18M11oacrMTVHaHjiPuPqf3xQtY8R4LKmQ3EOT/DRCA/7Wg==", + "requires": { + "@vue/compiler-ssr": "3.2.32", + "@vue/shared": "3.2.32" + } + }, + "@vue/shared": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/@vue/shared/-/shared-3.2.32.tgz", + "integrity": "sha512-bjcixPErUsAnTQRQX4Z5IQnICYjIfNCyCl8p29v1M6kfVzvwOICPw+dz48nNuWlTOOx2RHhzHdazJibE8GSnsw==" + }, + "@vueuse/core": { + "version": "8.2.5", + "resolved": "https://registry.npmmirror.com/@vueuse/core/-/core-8.2.5.tgz", + "integrity": "sha512-5prZAA1Ji2ltwNUnzreu6WIXYqHYP/9U2BiY5mD/650VYLpVcwVlYznJDFcLCmEWI3o3Vd34oS1FUf+6Mh68GQ==", + "requires": { + "@vueuse/metadata": "8.2.5", + "@vueuse/shared": "8.2.5", + "vue-demi": "*" + } + }, + "@vueuse/metadata": { + "version": "8.2.5", + "resolved": "https://registry.npmmirror.com/@vueuse/metadata/-/metadata-8.2.5.tgz", + "integrity": "sha512-Lk9plJjh9cIdiRdcj16dau+2LANxIdFCiTgdfzwYXbflxq0QnMBeOD2qHgKDE7fuVrtPcVWj8VSuZEx1HRfNQA==" + }, + "@vueuse/shared": { + "version": "8.2.5", + "resolved": "https://registry.npmmirror.com/@vueuse/shared/-/shared-8.2.5.tgz", + "integrity": "sha512-lNWo+7sk6JCuOj4AiYM+6HZ6fq4xAuVq1sVckMQKgfCJZpZRe4i8es+ZULO5bYTKP+VrOCtqrLR2GzEfrbr3YQ==", + "requires": { + "vue-demi": "*" + } + }, + "ant-design-vue": { + "version": "2.2.8", + "resolved": "https://registry.npmmirror.com/ant-design-vue/-/ant-design-vue-2.2.8.tgz", + "integrity": "sha512-3graq9/gCfJQs6hznrHV6sa9oDmk/D1H3Oo0vLdVpPS/I61fZPk8NEyNKCHpNA6fT2cx6xx9U3QS63uuyikg/Q==", + "requires": { + "@ant-design/icons-vue": "^6.0.0", + "@babel/runtime": "^7.10.5", + "@simonwep/pickr": "~1.8.0", + "array-tree-filter": "^2.1.0", + "async-validator": "^3.3.0", + "dom-align": "^1.12.1", + "dom-scroll-into-view": "^2.0.0", + "lodash": "^4.17.21", + "lodash-es": "^4.17.15", + "moment": "^2.27.0", + "omit.js": "^2.0.0", + "resize-observer-polyfill": "^1.5.1", + "scroll-into-view-if-needed": "^2.2.25", + "shallow-equal": "^1.0.0", + "vue-types": "^3.0.0", + "warning": "^4.0.0" + }, + "dependencies": { + "async-validator": { + "version": "3.5.2", + "resolved": "https://registry.npmmirror.com/async-validator/-/async-validator-3.5.2.tgz", + "integrity": "sha512-8eLCg00W9pIRZSB781UUX/H6Oskmm8xloZfr09lz5bikRpBVDlJ3hRVuxxP1SxcwsEYfJ4IU8Q19Y8/893r3rQ==" + } + } + }, + "array-tree-filter": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/array-tree-filter/-/array-tree-filter-2.1.0.tgz", + "integrity": "sha512-4ROwICNlNw/Hqa9v+rk5h22KjmzB1JGTMVKP2AKJBOCgb0yL0ASf0+YvCcLNNwquOHNX48jkeZIJ3a+oOQqKcw==" + }, + "async-validator": { + "version": "4.0.7", + "resolved": "https://registry.npmmirror.com/async-validator/-/async-validator-4.0.7.tgz", + "integrity": "sha512-Pj2IR7u8hmUEDOwB++su6baaRi+QvsgajuFB9j95foM1N2gy5HM4z60hfusIO0fBPG5uLAEl6yCJr1jNSVugEQ==" + }, + "axios": { + "version": "0.26.1", + "resolved": "https://registry.npmmirror.com/axios/-/axios-0.26.1.tgz", + "integrity": "sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA==", + "requires": { + "follow-redirects": "^1.14.8" + }, + "dependencies": { + "follow-redirects": { + "version": "1.14.9", + "resolved": "https://registry.npmmirror.com/follow-redirects/-/follow-redirects-1.14.9.tgz", + "integrity": "sha512-MQDfihBQYMcyy5dhRDJUHcw7lb2Pv/TuE6xP1vyraLukNDHKbDxDNaOE3NbCAdKQApno+GPRyo1YAp89yCjK4w==" + } + } + }, + "compute-scroll-into-view": { + "version": "1.0.17", + "resolved": "https://registry.npmmirror.com/compute-scroll-into-view/-/compute-scroll-into-view-1.0.17.tgz", + "integrity": "sha512-j4dx+Fb0URmzbwwMUrhqWM2BEWHdFGx+qZ9qqASHRPqvTYdqvWnHg0H1hIbcyLnvgnoNAVMlwkepyqM3DaIFUg==" + }, + "copy-anything": { + "version": "2.0.6", + "resolved": "https://registry.npmmirror.com/copy-anything/-/copy-anything-2.0.6.tgz", + "integrity": "sha512-1j20GZTsvKNkc4BY3NpMOM8tt///wY3FpIzozTOFO2ffuZcV61nojHXVKIy3WM+7ADCy5FVhdZYHYDdgTU0yJw==", + "requires": { + "is-what": "^3.14.1" + } + }, + "core-js": { + "version": "3.22.5", + "resolved": "https://registry.npmmirror.com/core-js/-/core-js-3.22.5.tgz", + "integrity": "sha512-VP/xYuvJ0MJWRAobcmQ8F2H6Bsn+s7zqAAjFaHGBMc5AQm7zaelhD1LGduFn2EehEcQcU+br6t+fwbpQ5d1ZWA==" + }, + "csstype": { + "version": "2.6.20", + "resolved": "https://registry.npmmirror.com/csstype/-/csstype-2.6.20.tgz", + "integrity": "sha512-/WwNkdXfckNgw6S5R125rrW8ez139lBHWouiBvX8dfMFtcn6V81REDqnH7+CRpRipfYlyU1CmOnOxrmGcFOjeA==" + }, + "dayjs": { + "version": "1.11.0", + "resolved": "https://registry.npmmirror.com/dayjs/-/dayjs-1.11.0.tgz", + "integrity": "sha512-JLC809s6Y948/FuCZPm5IX8rRhQwOiyMb2TfVVQEixG7P8Lm/gt5S7yoQZmC8x1UehI9Pb7sksEt4xx14m+7Ug==" + }, + "dom-align": { + "version": "1.12.3", + "resolved": "https://registry.npmmirror.com/dom-align/-/dom-align-1.12.3.tgz", + "integrity": "sha512-Gj9hZN3a07cbR6zviMUBOMPdWxYhbMI+x+WS0NAIu2zFZmbK8ys9R79g+iG9qLnlCwpFoaB+fKy8Pdv470GsPA==" + }, + "dom-scroll-into-view": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/dom-scroll-into-view/-/dom-scroll-into-view-2.0.1.tgz", + "integrity": "sha512-bvVTQe1lfaUr1oFzZX80ce9KLDlZ3iU+XGNE/bz9HnGdklTieqsbmsLHe+rT2XWqopvL0PckkYqN7ksmm5pe3w==" + }, + "element-plus": { + "version": "2.1.9", + "resolved": "https://registry.npmmirror.com/element-plus/-/element-plus-2.1.9.tgz", + "integrity": "sha512-6mWqS3YrmJPnouWP4otzL8+MehfOnDFqDbcIdnmC07p+Z0JkWe/CVKc4Wky8AYC8nyDMUQyiZYvooCbqGuM7pg==", + "requires": { + "@ctrl/tinycolor": "^3.4.0", + "@element-plus/icons-vue": "^1.1.4", + "@floating-ui/dom": "^0.4.2", + "@popperjs/core": "^2.11.4", + "@types/lodash": "^4.14.181", + "@types/lodash-es": "^4.17.6", + "@vueuse/core": "^8.2.4", + "async-validator": "^4.0.7", + "dayjs": "^1.11.0", + "escape-html": "^1.0.3", + "lodash": "^4.17.21", + "lodash-es": "^4.17.21", + "lodash-unified": "^1.0.2", + "memoize-one": "^6.0.0", + "normalize-wheel-es": "^1.1.2" + } + }, + "errno": { + "version": "0.1.8", + "resolved": "https://registry.npmmirror.com/errno/-/errno-0.1.8.tgz", + "integrity": "sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A==", + "optional": true, + "requires": { + "prr": "~1.0.1" + } + }, + "esbuild": { + "version": "0.14.36", + "resolved": "https://registry.npmmirror.com/esbuild/-/esbuild-0.14.36.tgz", + "integrity": "sha512-HhFHPiRXGYOCRlrhpiVDYKcFJRdO0sBElZ668M4lh2ER0YgnkLxECuFe7uWCf23FrcLc59Pqr7dHkTqmRPDHmw==", + "dev": true, + "requires": { + "esbuild-android-64": "0.14.36", + "esbuild-android-arm64": "0.14.36", + "esbuild-darwin-64": "0.14.36", + "esbuild-darwin-arm64": "0.14.36", + "esbuild-freebsd-64": "0.14.36", + "esbuild-freebsd-arm64": "0.14.36", + "esbuild-linux-32": "0.14.36", + "esbuild-linux-64": "0.14.36", + "esbuild-linux-arm": "0.14.36", + "esbuild-linux-arm64": "0.14.36", + "esbuild-linux-mips64le": "0.14.36", + "esbuild-linux-ppc64le": "0.14.36", + "esbuild-linux-riscv64": "0.14.36", + "esbuild-linux-s390x": "0.14.36", + "esbuild-netbsd-64": "0.14.36", + "esbuild-openbsd-64": "0.14.36", + "esbuild-sunos-64": "0.14.36", + "esbuild-windows-32": "0.14.36", + "esbuild-windows-64": "0.14.36", + "esbuild-windows-arm64": "0.14.36" + } + }, + "esbuild-darwin-64": { + "version": "0.14.36", + "resolved": "https://registry.npmmirror.com/esbuild-darwin-64/-/esbuild-darwin-64-0.14.36.tgz", + "integrity": "sha512-kkl6qmV0dTpyIMKagluzYqlc1vO0ecgpviK/7jwPbRDEv5fejRTaBBEE2KxEQbTHcLhiiDbhG7d5UybZWo/1zQ==", + "dev": true, + "optional": true + }, + "escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==" + }, + "fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmmirror.com/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "optional": true + }, + "function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", + "dev": true + }, + "graceful-fs": { + "version": "4.2.10", + "resolved": "https://registry.npmmirror.com/graceful-fs/-/graceful-fs-4.2.10.tgz", + "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==", + "optional": true + }, + "has": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "dev": true, + "requires": { + "function-bind": "^1.1.1" + } + }, + "iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "optional": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "image-size": { + "version": "0.5.5", + "resolved": "https://registry.npmmirror.com/image-size/-/image-size-0.5.5.tgz", + "integrity": "sha512-6TDAlDPZxUFCv+fuOkIoXT/V/f3Qbq8e37p+YOiYrUv3v9cc3/6x78VdfPgFVaB9dZYeLUfKgHRebpkm/oP2VQ==", + "optional": true + }, + "is-core-module": { + "version": "2.8.1", + "resolved": "https://registry.npmmirror.com/is-core-module/-/is-core-module-2.8.1.tgz", + "integrity": "sha512-SdNCUs284hr40hFTFP6l0IfZ/RSrMXF3qgoRHd3/79unUTvrFO/JoXwkGm+5J/Oe3E/b5GsnG330uUNgRpu1PA==", + "dev": true, + "requires": { + "has": "^1.0.3" + } + }, + "is-plain-object": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/is-plain-object/-/is-plain-object-3.0.1.tgz", + "integrity": "sha512-Xnpx182SBMrr/aBik8y+GuR4U1L9FqMSojwDQwPMmxyC6bvEqly9UBCxhauBF5vNh2gwWJNX6oDV7O+OM4z34g==" + }, + "is-what": { + "version": "3.14.1", + "resolved": "https://registry.npmmirror.com/is-what/-/is-what-3.14.1.tgz", + "integrity": "sha512-sNxgpk9793nzSs7bA6JQJGeIuRBQhAaNGG77kzYQgMkrID+lS6SlK07K5LaptscDlSaIgH+GPFzf+d75FVxozA==" + }, + "js-audio-recorder": { + "version": "0.5.7", + "resolved": "https://registry.npmmirror.com/js-audio-recorder/-/js-audio-recorder-0.5.7.tgz", + "integrity": "sha512-DIlv30N86AYHr7zGHN0O7V/3Rd8Q6SIJ/MBzVJaT9STWTdhF4E/8fxCX6ZMgRSv8xmx6fEqcFFNPoofmxJD4+A==" + }, + "js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "lamejs": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/lamejs/-/lamejs-1.2.1.tgz", + "integrity": "sha512-s7bxvjvYthw6oPLCm5pFxvA84wUROODB8jEO2+CE1adhKgrIvVOlmMgY8zyugxGrvRaDHNJanOiS21/emty6dQ==", + "requires": { + "use-strict": "1.0.1" + } + }, + "less": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/less/-/less-4.1.2.tgz", + "integrity": "sha512-EoQp/Et7OSOVu0aJknJOtlXZsnr8XE8KwuzTHOLeVSEx8pVWUICc8Q0VYRHgzyjX78nMEyC/oztWFbgyhtNfDA==", + "requires": { + "copy-anything": "^2.0.1", + "errno": "^0.1.1", + "graceful-fs": "^4.1.2", + "image-size": "~0.5.0", + "make-dir": "^2.1.0", + "mime": "^1.4.1", + "needle": "^2.5.2", + "parse-node-version": "^1.0.1", + "source-map": "~0.6.0", + "tslib": "^2.3.0" + } + }, + "lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmmirror.com/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmmirror.com/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==" + }, + "lodash-unified": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/lodash-unified/-/lodash-unified-1.0.2.tgz", + "integrity": "sha512-OGbEy+1P+UT26CYi4opY4gebD8cWRDxAT6MAObIVQMiqYdxZr1g3QHWCToVsm31x2NkLS4K3+MC2qInaRMa39g==", + "requires": {} + }, + "loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmmirror.com/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "requires": { + "js-tokens": "^3.0.0 || ^4.0.0" + } + }, + "magic-string": { + "version": "0.25.9", + "resolved": "https://registry.npmmirror.com/magic-string/-/magic-string-0.25.9.tgz", + "integrity": "sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==", + "requires": { + "sourcemap-codec": "^1.4.8" + } + }, + "make-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/make-dir/-/make-dir-2.1.0.tgz", + "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", + "optional": true, + "requires": { + "pify": "^4.0.1", + "semver": "^5.6.0" + } + }, + "memoize-one": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/memoize-one/-/memoize-one-6.0.0.tgz", + "integrity": "sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==" + }, + "mime": { + "version": "1.6.0", + "resolved": "https://registry.npmmirror.com/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "optional": true + }, + "moment": { + "version": "2.29.3", + "resolved": "https://registry.npmmirror.com/moment/-/moment-2.29.3.tgz", + "integrity": "sha512-c6YRvhEo//6T2Jz/vVtYzqBzwvPT95JBQ+smCytzf7c50oMZRsR/a4w88aD34I+/QVSfnoAnSBFPJHItlOMJVw==" + }, + "nanoid": { + "version": "3.3.2", + "resolved": "https://registry.npmmirror.com/nanoid/-/nanoid-3.3.2.tgz", + "integrity": "sha512-CuHBogktKwpm5g2sRgv83jEy2ijFzBwMoYA60orPDR7ynsLijJDqgsi4RDGj3OJpy3Ieb+LYwiRmIOGyytgITA==" + }, + "nanopop": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/nanopop/-/nanopop-2.1.0.tgz", + "integrity": "sha512-jGTwpFRexSH+fxappnGQtN9dspgE2ipa1aOjtR24igG0pv6JCxImIAmrLRHX+zUF5+1wtsFVbKyfP51kIGAVNw==" + }, + "needle": { + "version": "2.9.1", + "resolved": "https://registry.npmmirror.com/needle/-/needle-2.9.1.tgz", + "integrity": "sha512-6R9fqJ5Zcmf+uYaFgdIHmLwNldn5HbK8L5ybn7Uz+ylX/rnOsSp1AHcvQSrCaFN+qNM1wpymHqD7mVasEOlHGQ==", + "optional": true, + "requires": { + "debug": "^3.2.6", + "iconv-lite": "^0.4.4", + "sax": "^1.2.4" + }, + "dependencies": { + "debug": { + "version": "3.2.7", + "resolved": "https://registry.npmmirror.com/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "optional": true, + "requires": { + "ms": "^2.1.1" + } + }, + "ms": { + "version": "2.1.3", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "optional": true + } + } + }, + "normalize-wheel-es": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/normalize-wheel-es/-/normalize-wheel-es-1.1.2.tgz", + "integrity": "sha512-scX83plWJXYH1J4+BhAuIHadROzxX0UBF3+HuZNY2Ks8BciE7tSTQ+5JhTsvzjaO0/EJdm4JBGrfObKxFf3Png==" + }, + "omit.js": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/omit.js/-/omit.js-2.0.2.tgz", + "integrity": "sha512-hJmu9D+bNB40YpL9jYebQl4lsTW6yEHRTroJzNLqQJYHm7c+NQnJGfZmIWh8S3q3KoaxV1aLhV6B3+0N0/kyJg==" + }, + "parse-node-version": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/parse-node-version/-/parse-node-version-1.0.1.tgz", + "integrity": "sha512-3YHlOa/JgH6Mnpr05jP9eDG254US9ek25LyIxZlDItp2iJtwyaXQb57lBYLdT3MowkUFYEV2XXNAYIPlESvJlA==" + }, + "path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmmirror.com/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true + }, + "picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + }, + "pify": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "optional": true + }, + "postcss": { + "version": "8.4.12", + "resolved": "https://registry.npmmirror.com/postcss/-/postcss-8.4.12.tgz", + "integrity": "sha512-lg6eITwYe9v6Hr5CncVbK70SoioNQIq81nsaG86ev5hAidQvmOeETBqs7jm43K2F5/Ley3ytDtriImV6TpNiSg==", + "requires": { + "nanoid": "^3.3.1", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + } + }, + "prr": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/prr/-/prr-1.0.1.tgz", + "integrity": "sha512-yPw4Sng1gWghHQWj0B3ZggWUm4qVbPwPFcRG8KyxiU7J2OHFSoEHKS+EZ3fv5l1t9CyCiop6l/ZYeWbrgoQejw==", + "optional": true + }, + "regenerator-runtime": { + "version": "0.13.9", + "resolved": "https://registry.npmmirror.com/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz", + "integrity": "sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA==" + }, + "resize-observer-polyfill": { + "version": "1.5.1", + "resolved": "https://registry.npmmirror.com/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz", + "integrity": "sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg==" + }, + "resolve": { + "version": "1.22.0", + "resolved": "https://registry.npmmirror.com/resolve/-/resolve-1.22.0.tgz", + "integrity": "sha512-Hhtrw0nLeSrFQ7phPp4OOcVjLPIeMnRlr5mcnVuMe7M/7eBn98A3hmFRLoFo3DLZkivSYwhRUJTyPyWAk56WLw==", + "dev": true, + "requires": { + "is-core-module": "^2.8.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + } + }, + "rollup": { + "version": "2.70.1", + "resolved": "https://registry.npmmirror.com/rollup/-/rollup-2.70.1.tgz", + "integrity": "sha512-CRYsI5EuzLbXdxC6RnYhOuRdtz4bhejPMSWjsFLfVM/7w/85n2szZv6yExqUXsBdz5KT8eoubeyDUDjhLHEslA==", + "dev": true, + "requires": { + "fsevents": "~2.3.2" + } + }, + "safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmmirror.com/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "optional": true + }, + "sax": { + "version": "1.2.4", + "resolved": "https://registry.npmmirror.com/sax/-/sax-1.2.4.tgz", + "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==", + "optional": true + }, + "scroll-into-view-if-needed": { + "version": "2.2.29", + "resolved": "https://registry.npmmirror.com/scroll-into-view-if-needed/-/scroll-into-view-if-needed-2.2.29.tgz", + "integrity": "sha512-hxpAR6AN+Gh53AdAimHM6C8oTN1ppwVZITihix+WqalywBeFcQ6LdQP5ABNl26nX8GTEL7VT+b8lKpdqq65wXg==", + "requires": { + "compute-scroll-into-view": "^1.0.17" + } + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmmirror.com/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "optional": true + }, + "shallow-equal": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/shallow-equal/-/shallow-equal-1.2.1.tgz", + "integrity": "sha512-S4vJDjHHMBaiZuT9NPb616CSmLf618jawtv3sufLl6ivK8WocjAo58cXwbRV1cgqxH0Qbv+iUt6m05eqEa2IRA==" + }, + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmmirror.com/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + }, + "source-map-js": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/source-map-js/-/source-map-js-1.0.2.tgz", + "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==" + }, + "sourcemap-codec": { + "version": "1.4.8", + "resolved": "https://registry.npmmirror.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz", + "integrity": "sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==" + }, + "supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true + }, + "tslib": { + "version": "2.4.0", + "resolved": "https://registry.npmmirror.com/tslib/-/tslib-2.4.0.tgz", + "integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==" + }, + "use-strict": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/use-strict/-/use-strict-1.0.1.tgz", + "integrity": "sha512-IeiWvvEXfW5ltKVMkxq6FvNf2LojMKvB2OCeja6+ct24S1XOmQw2dGr2JyndwACWAGJva9B7yPHwAmeA9QCqAQ==" + }, + "vite": { + "version": "2.9.1", + "resolved": "https://registry.npmmirror.com/vite/-/vite-2.9.1.tgz", + "integrity": "sha512-vSlsSdOYGcYEJfkQ/NeLXgnRv5zZfpAsdztkIrs7AZHV8RCMZQkwjo4DS5BnrYTqoWqLoUe1Cah4aVO4oNNqCQ==", + "dev": true, + "requires": { + "esbuild": "^0.14.27", + "fsevents": "~2.3.2", + "postcss": "^8.4.12", + "resolve": "^1.22.0", + "rollup": "^2.59.0" + } + }, + "vue": { + "version": "3.2.32", + "resolved": "https://registry.npmmirror.com/vue/-/vue-3.2.32.tgz", + "integrity": "sha512-6L3jKZApF042OgbCkh+HcFeAkiYi3Lovi8wNhWqIK98Pi5efAMLZzRHgi91v+60oIRxdJsGS9sTMsb+yDpY8Eg==", + "requires": { + "@vue/compiler-dom": "3.2.32", + "@vue/compiler-sfc": "3.2.32", + "@vue/runtime-dom": "3.2.32", + "@vue/server-renderer": "3.2.32", + "@vue/shared": "3.2.32" + } + }, + "vue-demi": { + "version": "0.12.5", + "resolved": "https://registry.npmmirror.com/vue-demi/-/vue-demi-0.12.5.tgz", + "integrity": "sha512-BREuTgTYlUr0zw0EZn3hnhC3I6gPWv+Kwh4MCih6QcAeaTlaIX0DwOVN0wHej7hSvDPecz4jygy/idsgKfW58Q==", + "requires": {} + }, + "vue-types": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/vue-types/-/vue-types-3.0.2.tgz", + "integrity": "sha512-IwUC0Aq2zwaXqy74h4WCvFCUtoV0iSWr0snWnE9TnU18S66GAQyqQbRf2qfJtUuiFsBf6qp0MEwdonlwznlcrw==", + "requires": { + "is-plain-object": "3.0.1" + } + }, + "warning": { + "version": "4.0.3", + "resolved": "https://registry.npmmirror.com/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "requires": { + "loose-envify": "^1.0.0" + } + } + } +} diff --git a/demos/speech_web/web_client/package.json b/demos/speech_web/web_client/package.json new file mode 100644 index 0000000000000000000000000000000000000000..7f28d4c97cc942b4f089240b0e868a74f7815b91 --- /dev/null +++ b/demos/speech_web/web_client/package.json @@ -0,0 +1,23 @@ +{ + "name": "paddlespeechwebclient", + "private": true, + "version": "0.0.0", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview" + }, + "dependencies": { + "ant-design-vue": "^2.2.8", + "axios": "^0.26.1", + "element-plus": "^2.1.9", + "js-audio-recorder": "0.5.7", + "lamejs": "^1.2.1", + "less": "^4.1.2", + "vue": "^3.2.25" + }, + "devDependencies": { + "@vitejs/plugin-vue": "^2.3.0", + "vite": "^2.9.0" + } +} diff --git a/demos/speech_web/web_client/public/favicon.ico b/demos/speech_web/web_client/public/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..342038720d7c5a8fbbef1110d098e50f7a0e6274 Binary files /dev/null and b/demos/speech_web/web_client/public/favicon.ico differ diff --git a/demos/speech_web/web_client/src/App.vue b/demos/speech_web/web_client/src/App.vue new file mode 100644 index 0000000000000000000000000000000000000000..a70dbf9c4f68e54b98aa2ca040d5f0e0bdde7014 --- /dev/null +++ b/demos/speech_web/web_client/src/App.vue @@ -0,0 +1,19 @@ + + + + + diff --git a/demos/speech_web/web_client/src/api/API.js b/demos/speech_web/web_client/src/api/API.js new file mode 100644 index 0000000000000000000000000000000000000000..0feaa63f180932aa3ca5c9a5752919fc3f472eab --- /dev/null +++ b/demos/speech_web/web_client/src/api/API.js @@ -0,0 +1,29 @@ +export const apiURL = { + ASR_OFFLINE : '/api/asr/offline', // 获取离线语音识别结果 + ASR_COLLECT_ENV : '/api/asr/collectEnv', // 采集环境噪音 + ASR_STOP_RECORD : '/api/asr/stopRecord', // 后端暂停录音 + ASR_RESUME_RECORD : '/api/asr/resumeRecord',// 后端恢复录音 + + NLP_CHAT : '/api/nlp/chat', // NLP闲聊接口 + NLP_IE : '/api/nlp/ie', // 信息抽取接口 + + TTS_OFFLINE : '/api/tts/offline', // 获取TTS音频 + + VPR_RECOG : '/api/vpr/recog', // 声纹识别接口,返回声纹对比相似度 + VPR_ENROLL : '/api/vpr/enroll', // 声纹识别注册接口 + VPR_LIST : '/api/vpr/list', // 获取声纹注册的数据列表 + VPR_DEL : '/api/vpr/del', // 删除用户声纹 + VPR_DATA : '/api/vpr/database64?vprId=', // 获取声纹注册数据 bs64格式 + + // websocket + CHAT_SOCKET_RECORD: 'ws://localhost:8010/ws/asr/offlineStream', // ChatBot websocket 接口 + ASR_SOCKET_RECORD: 'ws://localhost:8010/ws/asr/onlineStream', // Stream ASR 接口 + TTS_SOCKET_RECORD: 'ws://localhost:8010/ws/tts/online', // Stream TTS 接口 +} + + + + + + + diff --git a/demos/speech_web/web_client/src/api/ApiASR.js b/demos/speech_web/web_client/src/api/ApiASR.js new file mode 100644 index 0000000000000000000000000000000000000000..342c56164f3b6f3bd8acbf8160e1f6dac4340a80 --- /dev/null +++ b/demos/speech_web/web_client/src/api/ApiASR.js @@ -0,0 +1,30 @@ +import axios from 'axios' +import {apiURL} from "./API.js" + +// 上传音频文件,获得识别结果 +export async function asrOffline(params){ + const result = await axios.post( + apiURL.ASR_OFFLINE, params + ) + return result +} + +// 上传环境采集文件 +export async function asrCollentEnv(params){ + const result = await axios.post( + apiURL.ASR_OFFLINE, params + ) + return result +} + +// 暂停录音 +export async function asrStopRecord(){ + const result = await axios.get(apiURL.ASR_STOP_RECORD); + return result +} + +// 恢复录音 +export async function asrResumeRecord(){ + const result = await axios.get(apiURL.ASR_RESUME_RECORD); + return result +} \ No newline at end of file diff --git a/demos/speech_web/web_client/src/api/ApiNLP.js b/demos/speech_web/web_client/src/api/ApiNLP.js new file mode 100644 index 0000000000000000000000000000000000000000..92259054ac2d243ba3fa4bcd13bd06bf90cc4b8c --- /dev/null +++ b/demos/speech_web/web_client/src/api/ApiNLP.js @@ -0,0 +1,17 @@ +import axios from 'axios' +import {apiURL} from "./API.js" + +// 获取闲聊对话结果 +export async function nlpChat(text){ + const result = await axios.post(apiURL.NLP_CHAT, { chat : text}); + return result +} + +// 获取信息抽取结果 +export async function nlpIE(text){ + const result = await axios.post(apiURL.NLP_IE, { chat : text}); + return result +} + + + diff --git a/demos/speech_web/web_client/src/api/ApiTTS.js b/demos/speech_web/web_client/src/api/ApiTTS.js new file mode 100644 index 0000000000000000000000000000000000000000..1d23a4bd12efbe84a0fdd6340793e98ff321d830 --- /dev/null +++ b/demos/speech_web/web_client/src/api/ApiTTS.js @@ -0,0 +1,8 @@ +import axios from 'axios' +import {apiURL} from "./API.js" + +export async function ttsOffline(text){ + const result = await axios.post(apiURL.TTS_OFFLINE, { text : text}); + return result +} + diff --git a/demos/speech_web/web_client/src/api/ApiVPR.js b/demos/speech_web/web_client/src/api/ApiVPR.js new file mode 100644 index 0000000000000000000000000000000000000000..e3ae2f5ecb061640ddf811e0d3da4a856059f944 --- /dev/null +++ b/demos/speech_web/web_client/src/api/ApiVPR.js @@ -0,0 +1,32 @@ +import axios from 'axios' +import {apiURL} from "./API.js" + +// 注册声纹 +export async function vprEnroll(params){ + const result = await axios.post(apiURL.VPR_ENROLL, params); + return result +} + +// 声纹识别 +export async function vprRecog(params){ + const result = await axios.post(apiURL.VPR_RECOG, params); + return result +} + +// 删除声纹 +export async function vprDel(params){ + const result = await axios.post(apiURL.VPR_DEL, params); + return result +} + +// 获取声纹列表 +export async function vprList(){ + const result = await axios.get(apiURL.VPR_LIST); + return result +} + +// 获取声纹音频 +export async function vprData(params){ + const result = await axios.get(apiURL.VPR_DATA+params); + return result +} diff --git "a/demos/speech_web/web_client/src/assets/image/ic_\345\244\247-\344\270\212\344\274\240\346\226\207\344\273\266.svg" "b/demos/speech_web/web_client/src/assets/image/ic_\345\244\247-\344\270\212\344\274\240\346\226\207\344\273\266.svg" new file mode 100644 index 0000000000000000000000000000000000000000..4c3c86403037b5965634320a32be0f17747cbd3e --- /dev/null +++ "b/demos/speech_web/web_client/src/assets/image/ic_\345\244\247-\344\270\212\344\274\240\346\226\207\344\273\266.svg" @@ -0,0 +1,6 @@ + + + + + + diff --git "a/demos/speech_web/web_client/src/assets/image/ic_\345\244\247-\345\243\260\351\237\263\346\263\242\346\265\252.svg" "b/demos/speech_web/web_client/src/assets/image/ic_\345\244\247-\345\243\260\351\237\263\346\263\242\346\265\252.svg" new file mode 100644 index 0000000000000000000000000000000000000000..dfbdc0e85308714aa818735f97cb810efe764813 --- /dev/null +++ "b/demos/speech_web/web_client/src/assets/image/ic_\345\244\247-\345\243\260\351\237\263\346\263\242\346\265\252.svg" @@ -0,0 +1,6 @@ + + + + + + diff --git "a/demos/speech_web/web_client/src/assets/image/ic_\345\244\247-\350\257\255\351\237\263.svg" "b/demos/speech_web/web_client/src/assets/image/ic_\345\244\247-\350\257\255\351\237\263.svg" new file mode 100644 index 0000000000000000000000000000000000000000..54571a3e3b7c6e4832a88893284f50d9515e6794 --- /dev/null +++ "b/demos/speech_web/web_client/src/assets/image/ic_\345\244\247-\350\257\255\351\237\263.svg" @@ -0,0 +1,6 @@ + + + + + + diff --git "a/demos/speech_web/web_client/src/assets/image/ic_\345\260\217-\345\275\225\345\210\266\350\257\255\351\237\263.svg" "b/demos/speech_web/web_client/src/assets/image/ic_\345\260\217-\345\275\225\345\210\266\350\257\255\351\237\263.svg" new file mode 100644 index 0000000000000000000000000000000000000000..b61f7ac035906663c54b967de19566c56189406e --- /dev/null +++ "b/demos/speech_web/web_client/src/assets/image/ic_\345\260\217-\345\275\225\345\210\266\350\257\255\351\237\263.svg" @@ -0,0 +1,6 @@ + + + + + + diff --git "a/demos/speech_web/web_client/src/assets/image/ic_\345\260\217-\347\273\223\346\235\237.svg" "b/demos/speech_web/web_client/src/assets/image/ic_\345\260\217-\347\273\223\346\235\237.svg" new file mode 100644 index 0000000000000000000000000000000000000000..01a8dc65ea3d488cd1a2ceca4d6288e9005197de --- /dev/null +++ "b/demos/speech_web/web_client/src/assets/image/ic_\345\260\217-\347\273\223\346\235\237.svg" @@ -0,0 +1,3 @@ + + + diff --git "a/demos/speech_web/web_client/src/assets/image/ic_\345\274\200\345\247\213\350\201\212\345\244\251.svg" "b/demos/speech_web/web_client/src/assets/image/ic_\345\274\200\345\247\213\350\201\212\345\244\251.svg" new file mode 100644 index 0000000000000000000000000000000000000000..073efd5e04ba9d2e5d186a953c4365e9f955c343 --- /dev/null +++ "b/demos/speech_web/web_client/src/assets/image/ic_\345\274\200\345\247\213\350\201\212\345\244\251.svg" @@ -0,0 +1,6 @@ + + + + + + diff --git "a/demos/speech_web/web_client/src/assets/image/ic_\345\274\200\345\247\213\350\201\212\345\244\251_hover.svg" "b/demos/speech_web/web_client/src/assets/image/ic_\345\274\200\345\247\213\350\201\212\345\244\251_hover.svg" new file mode 100644 index 0000000000000000000000000000000000000000..824f974ab939f78f08a425e7b0fe6b07cbcdf9ec --- /dev/null +++ "b/demos/speech_web/web_client/src/assets/image/ic_\345\274\200\345\247\213\350\201\212\345\244\251_hover.svg" @@ -0,0 +1,6 @@ + + + + + + diff --git "a/demos/speech_web/web_client/src/assets/image/ic_\346\222\255\346\224\276\357\274\210\346\214\211\351\222\256\357\274\211.svg" "b/demos/speech_web/web_client/src/assets/image/ic_\346\222\255\346\224\276\357\274\210\346\214\211\351\222\256\357\274\211.svg" new file mode 100644 index 0000000000000000000000000000000000000000..4dc1461fd58c8646521381451bfa752a132c4433 --- /dev/null +++ "b/demos/speech_web/web_client/src/assets/image/ic_\346\222\255\346\224\276\357\274\210\346\214\211\351\222\256\357\274\211.svg" @@ -0,0 +1,3 @@ + + + diff --git "a/demos/speech_web/web_client/src/assets/image/ic_\346\232\202\345\201\234\357\274\210\346\214\211\351\222\256\357\274\211.svg" "b/demos/speech_web/web_client/src/assets/image/ic_\346\232\202\345\201\234\357\274\210\346\214\211\351\222\256\357\274\211.svg" new file mode 100644 index 0000000000000000000000000000000000000000..6ede8ea62b1d7a111423ea5ef6fc7bffbfdcc2ae --- /dev/null +++ "b/demos/speech_web/web_client/src/assets/image/ic_\346\232\202\345\201\234\357\274\210\346\214\211\351\222\256\357\274\211.svg" @@ -0,0 +1,3 @@ + + + diff --git "a/demos/speech_web/web_client/src/assets/image/ic_\346\233\264\346\215\242\347\244\272\344\276\213.svg" "b/demos/speech_web/web_client/src/assets/image/ic_\346\233\264\346\215\242\347\244\272\344\276\213.svg" new file mode 100644 index 0000000000000000000000000000000000000000..d126775d3045a109c9ef72ca35ef120caadcbf97 --- /dev/null +++ "b/demos/speech_web/web_client/src/assets/image/ic_\346\233\264\346\215\242\347\244\272\344\276\213.svg" @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git "a/demos/speech_web/web_client/src/assets/image/icon_\345\260\217-\345\243\260\351\237\263\346\263\242\346\265\252.svg" "b/demos/speech_web/web_client/src/assets/image/icon_\345\260\217-\345\243\260\351\237\263\346\263\242\346\265\252.svg" new file mode 100644 index 0000000000000000000000000000000000000000..3dfed9be549821a04d57d991cd53128dc571d088 --- /dev/null +++ "b/demos/speech_web/web_client/src/assets/image/icon_\345\260\217-\345\243\260\351\237\263\346\263\242\346\265\252.svg" @@ -0,0 +1,6 @@ + + + + + + diff --git "a/demos/speech_web/web_client/src/assets/image/icon_\345\275\225\345\210\266\345\243\260\351\237\263\345\260\217\350\257\255\351\237\2631.svg" "b/demos/speech_web/web_client/src/assets/image/icon_\345\275\225\345\210\266\345\243\260\351\237\263\345\260\217\350\257\255\351\237\2631.svg" new file mode 100644 index 0000000000000000000000000000000000000000..4fe4f0f7dbe66b9665f79bced8504c465bea4f6f --- /dev/null +++ "b/demos/speech_web/web_client/src/assets/image/icon_\345\275\225\345\210\266\345\243\260\351\237\263\345\260\217\350\257\255\351\237\2631.svg" @@ -0,0 +1,14 @@ + + + icon_录制声音(小语音) + + + + + + + + + + + \ No newline at end of file diff --git "a/demos/speech_web/web_client/src/assets/image/\345\234\250\347\272\277\344\275\223\351\252\214-\350\203\214\346\231\257@2x.png" "b/demos/speech_web/web_client/src/assets/image/\345\234\250\347\272\277\344\275\223\351\252\214-\350\203\214\346\231\257@2x.png" new file mode 100644 index 0000000000000000000000000000000000000000..66627e1e66a12ba4efa82135bb16962184c033a2 Binary files /dev/null and "b/demos/speech_web/web_client/src/assets/image/\345\234\250\347\272\277\344\275\223\351\252\214-\350\203\214\346\231\257@2x.png" differ diff --git "a/demos/speech_web/web_client/src/assets/image/\345\234\272\346\231\257\351\275\220\345\205\250@3x.png" "b/demos/speech_web/web_client/src/assets/image/\345\234\272\346\231\257\351\275\220\345\205\250@3x.png" new file mode 100644 index 0000000000000000000000000000000000000000..b85427a1af810553840ec72241d328dbd6220f4b Binary files /dev/null and "b/demos/speech_web/web_client/src/assets/image/\345\234\272\346\231\257\351\275\220\345\205\250@3x.png" differ diff --git "a/demos/speech_web/web_client/src/assets/image/\346\225\231\347\250\213\344\270\260\345\257\214@3x.png" "b/demos/speech_web/web_client/src/assets/image/\346\225\231\347\250\213\344\270\260\345\257\214@3x.png" new file mode 100644 index 0000000000000000000000000000000000000000..6edd64316cf80e651946189fb83ce5c7b61e4a4a Binary files /dev/null and "b/demos/speech_web/web_client/src/assets/image/\346\225\231\347\250\213\344\270\260\345\257\214@3x.png" differ diff --git "a/demos/speech_web/web_client/src/assets/image/\346\250\241\345\236\213\345\205\250\351\235\242@3x.png" "b/demos/speech_web/web_client/src/assets/image/\346\250\241\345\236\213\345\205\250\351\235\242@3x.png" new file mode 100644 index 0000000000000000000000000000000000000000..4d54eac05260e4ea5210433e0398e80ad15964d9 Binary files /dev/null and "b/demos/speech_web/web_client/src/assets/image/\346\250\241\345\236\213\345\205\250\351\235\242@3x.png" differ diff --git "a/demos/speech_web/web_client/src/assets/image/\346\255\245\351\252\244-\347\256\255\345\244\264\345\210\207\345\233\276@2x.png" "b/demos/speech_web/web_client/src/assets/image/\346\255\245\351\252\244-\347\256\255\345\244\264\345\210\207\345\233\276@2x.png" new file mode 100644 index 0000000000000000000000000000000000000000..d0cedecce1b6118fc5c29980b9a38435e112635c Binary files /dev/null and "b/demos/speech_web/web_client/src/assets/image/\346\255\245\351\252\244-\347\256\255\345\244\264\345\210\207\345\233\276@2x.png" differ diff --git "a/demos/speech_web/web_client/src/assets/image/\347\224\250\346\210\267\345\244\264\345\203\217@2x.png" "b/demos/speech_web/web_client/src/assets/image/\347\224\250\346\210\267\345\244\264\345\203\217@2x.png" new file mode 100644 index 0000000000000000000000000000000000000000..2970d00701849eb466ea9cc2396024059450f195 Binary files /dev/null and "b/demos/speech_web/web_client/src/assets/image/\347\224\250\346\210\267\345\244\264\345\203\217@2x.png" differ diff --git "a/demos/speech_web/web_client/src/assets/image/\351\243\236\346\241\250\345\244\264\345\203\217@2x.png" "b/demos/speech_web/web_client/src/assets/image/\351\243\236\346\241\250\345\244\264\345\203\217@2x.png" new file mode 100644 index 0000000000000000000000000000000000000000..1712170ed88a9d5e15834c33d2ba9872c6c2e5e3 Binary files /dev/null and "b/demos/speech_web/web_client/src/assets/image/\351\243\236\346\241\250\345\244\264\345\203\217@2x.png" differ diff --git a/demos/speech_web/web_client/src/assets/logo.png b/demos/speech_web/web_client/src/assets/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..f3d2503fc2a44b5053b0837ebea6e87a2d339a43 Binary files /dev/null and b/demos/speech_web/web_client/src/assets/logo.png differ diff --git a/demos/speech_web/web_client/src/components/Content/Header/Header.vue b/demos/speech_web/web_client/src/components/Content/Header/Header.vue new file mode 100644 index 0000000000000000000000000000000000000000..8135a2bffa47dd2c3f9cfc815b59bcdf7c65e6eb --- /dev/null +++ b/demos/speech_web/web_client/src/components/Content/Header/Header.vue @@ -0,0 +1,26 @@ + + + + + \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/Content/Header/style.less b/demos/speech_web/web_client/src/components/Content/Header/style.less new file mode 100644 index 0000000000000000000000000000000000000000..9d0261378299786a89148eeedd13229a4abfc6dc --- /dev/null +++ b/demos/speech_web/web_client/src/components/Content/Header/style.less @@ -0,0 +1,148 @@ +.speech_header { + width: 1200px; + margin: 0 auto; + padding-top: 50px; + // background: url("../../../assets/image/在线体验-背景@2x.png") no-repeat; + box-sizing: border-box; + &::after { + content: ""; + display: block; + clear: both; + visibility: hidden; + } + + ; + + // background: pink; + .speech_header_title { + height: 57px; + font-family: PingFangSC-Medium; + font-size: 38px; + color: #000000; + letter-spacing: 0; + line-height: 57px; + font-weight: 500; + margin-bottom: 15px; + } + + ; + + .speech_header_describe { + height: 26px; + font-family: PingFangSC-Regular; + font-size: 16px; + color: #575757; + line-height: 26px; + font-weight: 400; + margin-bottom: 24px; + } + + ; + .speech_header_link_box { + height: 40px; + margin-bottom: 40px; + display: flex; + align-items: center; + }; + .speech_header_link { + display: block; + background: #2932E1; + width: 120px; + height: 40px; + line-height: 40px; + border-radius: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #FFFFFF; + text-align: center; + font-weight: 500; + margin-right: 20px; + // margin-bottom: 40px; + + &:hover { + opacity: 0.9; + } + + ; + } + + ; + + .speech_header_divider { + width: 1200px; + height: 1px; + background: #D1D1D1; + margin-bottom: 40px; + } + + ; + + .speech_header_content_wrapper { + width: 1200px; + margin: 0 auto; + // background: pink; + margin-bottom: 20px; + display: flex; + justify-content: space-between; + flex-wrap: wrap; + + .speech_header_module { + width: 384px; + background: #FFFFFF; + border: 1px solid rgba(224, 224, 224, 1); + box-shadow: 4px 8px 12px 0px rgba(0, 0, 0, 0.05); + border-radius: 16px; + padding: 30px 34px 0px 34px; + box-sizing: border-box; + display: flex; + margin-bottom: 40px; + .speech_header_background_img { + width: 46px; + height: 46px; + background-size: 46px 46px; + background-repeat: no-repeat; + background-position: center; + margin-right: 20px; + } + + ; + + .speech_header_content { + padding-top: 4px; + margin-bottom: 32px; + + .speech_header_module_title { + height: 26px; + font-family: PingFangSC-Medium; + font-size: 20px; + color: #000000; + letter-spacing: 0; + line-height: 26px; + font-weight: 500; + margin-bottom: 10px; + } + + ; + + .speech_header_module_introduce { + font-family: PingFangSC-Regular; + font-size: 16px; + color: #666666; + letter-spacing: 0; + font-weight: 400; + } + + ; + } + + ; + } + + ; + } + + ; +} + +; + diff --git a/demos/speech_web/web_client/src/components/Content/Tail/Tail.vue b/demos/speech_web/web_client/src/components/Content/Tail/Tail.vue new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/demos/speech_web/web_client/src/components/Content/Tail/style.less b/demos/speech_web/web_client/src/components/Content/Tail/style.less new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/demos/speech_web/web_client/src/components/Experience.vue b/demos/speech_web/web_client/src/components/Experience.vue new file mode 100644 index 0000000000000000000000000000000000000000..5620d6af994e1f51abb4a833246a42e7fe8701bd --- /dev/null +++ b/demos/speech_web/web_client/src/components/Experience.vue @@ -0,0 +1,50 @@ + + + + + + \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/ASR/ASR.vue b/demos/speech_web/web_client/src/components/SubMenu/ASR/ASR.vue new file mode 100644 index 0000000000000000000000000000000000000000..edef6a7877d3128e443ba4bc33a9fea1add1ce7a --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/ASR/ASR.vue @@ -0,0 +1,154 @@ + + + + + \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/ASR/ASRT.vue b/demos/speech_web/web_client/src/components/SubMenu/ASR/ASRT.vue new file mode 100644 index 0000000000000000000000000000000000000000..245fddb2c4bcdebcacb0a5da7e0605ad43cf6685 --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/ASR/ASRT.vue @@ -0,0 +1,38 @@ + + + + + + + \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/ASR/AudioFile/AudioFileIdentification.vue b/demos/speech_web/web_client/src/components/SubMenu/ASR/AudioFile/AudioFileIdentification.vue new file mode 100644 index 0000000000000000000000000000000000000000..4d3cf3c31e4c84cdc0b1252ee8201fd527c83675 --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/ASR/AudioFile/AudioFileIdentification.vue @@ -0,0 +1,241 @@ + + + + + \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/ASR/AudioFile/style.less b/demos/speech_web/web_client/src/components/SubMenu/ASR/AudioFile/style.less new file mode 100644 index 0000000000000000000000000000000000000000..46b33272d67f91fbb89f422091343fcbdc0f1cdd --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/ASR/AudioFile/style.less @@ -0,0 +1,293 @@ +.audioFileIdentification { + width: 1106px; + height: 270px; + // background-color: pink; + padding-top: 40px; + box-sizing: border-box; + display: flex; + // 开始上传 + .public_recognition_speech { + width: 295px; + height: 230px; + padding-top: 32px; + box-sizing: border-box; + // 开始上传 + .upload_img { + width: 116px; + height: 116px; + background: #2932E1; + border-radius: 50%; + margin-left: 98px; + cursor: pointer; + margin-bottom: 20px; + display: flex; + justify-content: center; + align-items: center; + .upload_img_back { + width: 34.38px; + height: 30.82px; + background: #2932E1; + background: url("../../../../assets/image/ic_大-上传文件.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 34.38px 30.82px; + cursor: pointer; + } + &:hover { + opacity: 0.9; + }; + + }; + + + .speech_text { + height: 22px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #000000; + font-weight: 500; + margin-left: 124px; + margin-bottom: 10px; + }; + .speech_text_prompt { + height: 20px; + font-family: PingFangSC-Regular; + font-size: 14px; + color: #999999; + font-weight: 400; + margin-left: 84px; + }; + }; + // 上传中 + .on_the_cross_speech { + width: 295px; + height: 230px; + padding-top: 32px; + box-sizing: border-box; + + .on_the_upload_img { + width: 116px; + height: 116px; + background: #7278F5; + border-radius: 50%; + margin-left: 98px; + cursor: pointer; + margin-bottom: 20px; + display: flex; + justify-content: center; + align-items: center; + + .on_the_upload_img_back { + width: 34.38px; + height: 30.82px; + background: #7278F5; + background: url("../../../../assets/image/ic_大-上传文件.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 34.38px 30.82px; + cursor: pointer; + + }; + }; + + + .on_the_speech_text { + height: 22px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #000000; + font-weight: 500; + margin-left: 124px; + margin-bottom: 10px; + display: flex; + // justify-content: center; + align-items: center; + .on_the_speech_loading { + display: inline-block; + width: 16px; + height: 16px; + background: #7278F5; + // background: url("../../../../assets/image/ic_开始聊天.svg"); + // background-repeat: no-repeat; + // background-position: center; + // background-size: 16px 16px; + margin-right: 8px; + }; + }; + }; + + //开始识别 + .public_recognition_speech_start { + width: 295px; + height: 230px; + padding-top: 32px; + box-sizing: border-box; + position: relative; + .public_recognition_speech_content { + width: 100%; + position: absolute; + top: 40px; + left: 50%; + transform: translateX(-50%); + display: flex; + justify-content: center; + align-items: center; + + .public_recognition_speech_title { + height: 22px; + font-family: PingFangSC-Regular; + font-size: 16px; + color: #000000; + font-weight: 400; + }; + .public_recognition_speech_again { + height: 22px; + font-family: PingFangSC-Regular; + font-size: 16px; + color: #2932E1; + font-weight: 400; + margin-left: 30px; + cursor: pointer; + }; + .public_recognition_speech_play { + height: 22px; + font-family: PingFangSC-Regular; + font-size: 16px; + color: #2932E1; + font-weight: 400; + margin-left: 20px; + cursor: pointer; + }; + }; + .speech_promp { + position: absolute; + top: 112px; + left: 50%; + transform: translateX(-50%); + width: 142px; + height: 44px; + background: #2932E1; + border-radius: 22px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #FFFFFF; + text-align: center; + line-height: 44px; + font-weight: 500; + cursor: pointer; + }; + + + }; + // 识别中 + .public_recognition_speech_identify { + width: 295px; + height: 230px; + padding-top: 32px; + box-sizing: border-box; + position: relative; + .public_recognition_speech_identify_box { + width: 143px; + height: 44px; + background: #7278F5; + border-radius: 22px; + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%,-50%); + display: flex; + justify-content: center; + align-items: center; + cursor: pointer; + .public_recognition_speech_identify_back_img { + width: 16px; + height: 16px; + // background: #7278F5; + // background: url("../../../../assets/image/ic_开始聊天.svg"); + // background-repeat: no-repeat; + // background-position: center; + // background-size: 16px 16px; + }; + .public_recognition__identify_the_promp { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #FFFFFF; + font-weight: 500; + margin-left: 12px; + }; + }; + + + + }; + // 重新识别 + .public_recognition_speech_identify_ahain { + width: 295px; + height: 230px; + padding-top: 32px; + box-sizing: border-box; + position: relative; + cursor: pointer; + .public_recognition_speech_identify_box_btn { + width: 143px; + height: 44px; + background: #2932E1; + border-radius: 22px; + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%,-50%); + display: flex; + justify-content: center; + align-items: center; + cursor: pointer; + .public_recognition__identify_the_btn { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #FFFFFF; + font-weight: 500; + }; + }; + + + + }; + // 指向 + .public_recognition_point_to { + width: 47px; + height: 67px; + background: url("../../../../assets/image/步骤-箭头切图@2x.png") no-repeat; + background-position: center; + background-size: 47px 67px; + margin-top: 91px; + margin-right: 67px; + }; + // 识别结果 + .public_recognition_result { + width: 680px; + height: 230px; + background: #FAFAFA; + padding: 40px 50px 0px 50px; + div { + &:nth-of-type(1) { + height: 26px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #666666; + line-height: 26px; + font-weight: 500; + margin-bottom: 20px; + }; + &:nth-of-type(2) { + height: 26px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #666666; + line-height: 26px; + font-weight: 500; + }; + }; + }; +}; \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/ASR/EndToEnd/EndToEndIdentification.vue b/demos/speech_web/web_client/src/components/SubMenu/ASR/EndToEnd/EndToEndIdentification.vue new file mode 100644 index 0000000000000000000000000000000000000000..651e8c725a687c1cf0fd91b15b9151ba20fdb2da --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/ASR/EndToEnd/EndToEndIdentification.vue @@ -0,0 +1,92 @@ + + + + + \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/ASR/EndToEnd/style.less b/demos/speech_web/web_client/src/components/SubMenu/ASR/EndToEnd/style.less new file mode 100644 index 0000000000000000000000000000000000000000..1fc04b2c7d351872b378765fdfed87ba17d74c13 --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/ASR/EndToEnd/style.less @@ -0,0 +1,114 @@ +.endToEndIdentification { + width: 1106px; + height: 270px; + // background-color: pink; + padding-top: 40px; + box-sizing: border-box; + display: flex; + // 开始识别 + .public_recognition_speech { + width: 295px; + height: 230px; + padding-top: 32px; + box-sizing: border-box; + + .endToEndIdentification_start_recorder_img { + width: 116px; + height: 116px; + background: #2932E1; + background: url("../../../../assets/image/ic_开始聊天.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 116px 116px; + margin-left: 98px; + cursor: pointer; + margin-bottom: 20px; + &:hover { + background: url("../../../../assets/image/ic_开始聊天_hover.svg"); + + }; + + }; + + .endToEndIdentification_end_recorder_img { + width: 116px; + height: 116px; + background: #2932E1; + border-radius: 50%; + display: flex; + justify-content: center; + align-items: center; + margin-left: 98px; + margin-bottom: 20px; + cursor: pointer; + .endToEndIdentification_end_recorder_img_back { + width: 50px; + height: 50px; + background: url("../../../../assets/image/ic_大-声音波浪.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 50px 50px; + + &:hover { + opacity: 0.9; + + }; + }; + + }; + .endToEndIdentification_prompt { + height: 22px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #000000; + font-weight: 500; + margin-left: 124px; + margin-bottom: 10px; + }; + .speech_text_prompt { + height: 20px; + font-family: PingFangSC-Regular; + font-size: 14px; + color: #999999; + font-weight: 400; + margin-left: 90px; + }; + }; + // 指向 + .public_recognition_point_to { + width: 47px; + height: 67px; + background: url("../../../../assets/image/步骤-箭头切图@2x.png") no-repeat; + background-position: center; + background-size: 47px 67px; + margin-top: 91px; + margin-right: 67px; + }; + // 识别结果 + .public_recognition_result { + width: 680px; + height: 230px; + background: #FAFAFA; + padding: 40px 50px 0px 50px; + div { + &:nth-of-type(1) { + height: 26px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #666666; + line-height: 26px; + font-weight: 500; + margin-bottom: 20px; + }; + &:nth-of-type(2) { + height: 26px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #666666; + line-height: 26px; + font-weight: 500; + }; + }; + }; + +}; \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/ASR/RealTime/RealTime.vue b/demos/speech_web/web_client/src/components/SubMenu/ASR/RealTime/RealTime.vue new file mode 100644 index 0000000000000000000000000000000000000000..761a5c11fa4187ac424e601c423b9f3feaddf7ad --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/ASR/RealTime/RealTime.vue @@ -0,0 +1,128 @@ + + + + + \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/ASR/RealTime/style.less b/demos/speech_web/web_client/src/components/SubMenu/ASR/RealTime/style.less new file mode 100644 index 0000000000000000000000000000000000000000..baa89c5703eaba978861df1a93929a676630dd2f --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/ASR/RealTime/style.less @@ -0,0 +1,112 @@ +.realTime{ + width: 1106px; + height: 270px; + // background-color: pink; + padding-top: 40px; + box-sizing: border-box; + display: flex; + // 开始识别 + .public_recognition_speech { + width: 295px; + height: 230px; + padding-top: 32px; + box-sizing: border-box; + .endToEndIdentification_start_recorder_img { + width: 116px; + height: 116px; + background: #2932E1; + background: url("../../../../assets/image/ic_开始聊天.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 116px 116px; + margin-left: 98px; + cursor: pointer; + margin-bottom: 20px; + &:hover { + background: url("../../../../assets/image/ic_开始聊天_hover.svg"); + + }; + + }; + + .endToEndIdentification_end_recorder_img { + width: 116px; + height: 116px; + background: #2932E1; + border-radius: 50%; + display: flex; + justify-content: center; + align-items: center; + margin-left: 98px; + margin-bottom: 20px; + cursor: pointer; + .endToEndIdentification_end_recorder_img_back { + width: 50px; + height: 50px; + background: url("../../../../assets/image/ic_大-声音波浪.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 50px 50px; + + &:hover { + opacity: 0.9; + + }; + }; + + }; + .endToEndIdentification_prompt { + height: 22px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #000000; + font-weight: 500; + margin-left: 124px; + margin-bottom: 10px; + }; + .speech_text_prompt { + height: 20px; + font-family: PingFangSC-Regular; + font-size: 14px; + color: #999999; + font-weight: 400; + margin-left: 105px; + }; + }; + // 指向 + .public_recognition_point_to { + width: 47px; + height: 67px; + background: url("../../../../assets/image/步骤-箭头切图@2x.png") no-repeat; + background-position: center; + background-size: 47px 67px; + margin-top: 91px; + margin-right: 67px; + }; + // 识别结果 + .public_recognition_result { + width: 680px; + height: 230px; + background: #FAFAFA; + padding: 40px 50px 0px 50px; + div { + &:nth-of-type(1) { + height: 26px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #666666; + line-height: 26px; + font-weight: 500; + margin-bottom: 20px; + }; + &:nth-of-type(2) { + height: 26px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #666666; + line-height: 26px; + font-weight: 500; + }; + }; + }; +}; \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/ASR/style.less b/demos/speech_web/web_client/src/components/SubMenu/ASR/style.less new file mode 100644 index 0000000000000000000000000000000000000000..92ce9340b20860dee10770ff7547799307cf02c3 --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/ASR/style.less @@ -0,0 +1,76 @@ +.speech_recognition { + width: 1200px; + height: 410px; + background: #FFFFFF; + padding: 40px 50px 50px 44px; + position: relative; + .frame { + width: 605px; + height: 50px; + border: 1px solid rgba(238,238,238,1); + border-radius: 25px; + position: absolute; + } + .speech_recognition_mytabs { + .ant-tabs-tab { + position: relative; + display: inline-flex; + align-items: center; + // padding: 12px 0; + font-size: 14px; + background: transparent; + border: 0; + outline: none; + cursor: pointer; + padding: 12px 26px; + box-sizing: border-box; + } + .ant-tabs-tab-active { + height: 50px; + background: #EEEFFD; + border-radius: 25px; + padding: 12px 26px; + box-sizing: border-box; + }; + .speech_recognition .speech_recognition_mytabs .ant-tabs-ink-bar { + position: absolute; + background: transparent !important; + pointer-events: none; + } + .ant-tabs-ink-bar { + position: absolute; + background: transparent !important; + pointer-events: none; + } + .experience .experience_wrapper .experience_content .experience_tabs .ant-tabs-nav::before { + position: absolute; + right: 0; + left: 0; + border-bottom: 1px solid transparent !important; + // border: none; + content: ''; + } + .ant-tabs-top > .ant-tabs-nav::before, .ant-tabs-bottom > .ant-tabs-nav::before, .ant-tabs-top > div > .ant-tabs-nav::before, .ant-tabs-bottom > div > .ant-tabs-nav::before { + position: absolute; + right: 0; + left: 0; + border-bottom: 1px solid transparent !important; + // border: none; + content: ''; + } + .ant-tabs-top > .ant-tabs-nav::before, .ant-tabs-bottom > .ant-tabs-nav::before, .ant-tabs-top > div > .ant-tabs-nav::before, .ant-tabs-bottom > div > .ant-tabs-nav::before { + position: absolute; + right: 0; + left: 0; + border-bottom: 1px solid transparent !important; + content: ''; + } + .ant-tabs-nav::before { + position: absolute; + right: 0; + left: 0; + border-bottom: 1px solid transparent !important; + content: ''; + }; + }; +}; \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/ChatBot/Chat.vue b/demos/speech_web/web_client/src/components/SubMenu/ChatBot/Chat.vue new file mode 100644 index 0000000000000000000000000000000000000000..9d356fc80e932c2af3668a35eed783861b92bdef --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/ChatBot/Chat.vue @@ -0,0 +1,298 @@ + + + + + \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/ChatBot/ChatT.vue b/demos/speech_web/web_client/src/components/SubMenu/ChatBot/ChatT.vue new file mode 100644 index 0000000000000000000000000000000000000000..c37c083ffdad713950394598e6f5f6ade17e49b9 --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/ChatBot/ChatT.vue @@ -0,0 +1,255 @@ + + + + + \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/ChatBot/style.less b/demos/speech_web/web_client/src/components/SubMenu/ChatBot/style.less new file mode 100644 index 0000000000000000000000000000000000000000..d868fd47072a6d4a9e9f0ee399928c15ff5d81a0 --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/ChatBot/style.less @@ -0,0 +1,181 @@ +.voice_chat { + width: 1200px; + height: 410px; + background: #FFFFFF; + position: relative; + // 开始聊天 + .voice_chat_wrapper { + top: 50%; + left: 50%; + transform: translate(-50%,-50%); + position: absolute; + .voice_chat_btn { + width: 116px; + height: 116px; + margin-left: 54px; + // background: #2932E1; + border-radius: 50%; + cursor: pointer; + background: url("../../../assets/image/ic_开始聊天.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 116px 116px; + margin-bottom: 17px; + &:hover { + width: 116px; + height: 116px; + background: url("../../../assets/image/ic_开始聊天_hover.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 116px 116px; + }; + + }; + .voice_chat_btn_title { + height: 22px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #000000; + letter-spacing: 0; + text-align: center; + line-height: 22px; + font-weight: 500; + margin-bottom: 10px; + }; + .voice_chat_btn_prompt { + height: 24px; + font-family: PingFangSC-Regular; + font-size: 14px; + color: #999999; + letter-spacing: 0; + text-align: center; + line-height: 24px; + font-weight: 400; + }; + }; + .voice_chat_wrapper::after { + content: ""; + display: block; + clear: both; + visibility: hidden; + }; + // 结束聊天 + .voice_chat_dialog_wrapper { + width: 1200px; + height: 410px; + background: #FFFFFF; + position: relative; + .dialog_box { + width: 100%; + height: 410px; + padding: 50px 198px 82px 199px; + box-sizing: border-box; + + .dialog_content { + width: 100%; + height: 268px; + // background: rgb(113, 144, 145); + padding: 0px; + overflow: auto; + li { + list-style-type: none; + margin-bottom: 33px; + display: flex; + align-items: center; + &:last-of-type(1) { + margin-bottom: 0px; + }; + .dialog_content_img_pp { + width: 60px; + height: 60px; + // transform: scaleX(-1); + background: url("../../../assets/image/飞桨头像@2x.png"); + background-repeat: no-repeat; + background-position: center; + background-size: 60px 60px; + margin-right: 20px; + }; + .dialog_content_img_user { + width: 60px; + height: 60px; + transform: scaleX(-1); + background: url("../../../assets/image/用户头像@2x.png"); + background-repeat: no-repeat; + background-position: center; + background-size: 60px 60px; + margin-left: 20px; + }; + .dialog_content_dialogue_pp { + height: 50px; + background: #F5F5F5; + border-radius: 25px; + font-family: PingFangSC-Regular; + font-size: 14px; + color: #000000; + line-height: 50px; + font-weight: 400; + padding: 0px 16px; + box-sizing: border-box; + }; + .dialog_content_dialogue_user { + height: 50px; + background: rgba(41,50,225,0.90); + border-radius: 25px; + font-family: PingFangSC-Regular; + font-size: 14px; + color: #FFFFFF; + line-height: 50px; + font-weight: 400; + padding: 0px 16px; + box-sizing: border-box; + }; + }; + }; + .move_dialogue { + justify-content: flex-end; + }; + + }; + + .btn_end_dialog { + width: 124px; + height: 42px; + line-height: 42px; + background: #FFFFFF; + box-shadow: 0px 4px 16px 0px rgba(0,0,0,0.09); + border-radius: 21px; + padding: 0px 24px; + box-sizing: border-box; + position: absolute; + left: 50%; + bottom: 40px; + transform: translateX(-50%); + display: flex; + justify-content: space-between; + align-items: center; + cursor: pointer; + span { + display: inline-block; + &:nth-of-type(1) { + width: 16px; + height: 16px; + background: url("../../../assets/image/ic_小-结束.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 16px 16px; + + }; + &:nth-of-type(2) { + height: 20px; + font-family: PingFangSC-Regular; + font-size: 14px; + color: #F33E3E; + text-align: center; + font-weight: 400; + line-height: 20px; + margin-left: 4px; + }; + }; + }; + }; +}; \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/IE/IE.vue b/demos/speech_web/web_client/src/components/SubMenu/IE/IE.vue new file mode 100644 index 0000000000000000000000000000000000000000..c7dd04e9dd0539fd48aece36f3038c31dacf3660 --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/IE/IE.vue @@ -0,0 +1,125 @@ + + + + + \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/IE/IET.vue b/demos/speech_web/web_client/src/components/SubMenu/IE/IET.vue new file mode 100644 index 0000000000000000000000000000000000000000..50eadec707b180b3254134a831ce396ee2e78885 --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/IE/IET.vue @@ -0,0 +1,166 @@ + + + + + \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/IE/style.less b/demos/speech_web/web_client/src/components/SubMenu/IE/style.less new file mode 100644 index 0000000000000000000000000000000000000000..988666a26e195a7ae9300b85875d1259f9075080 --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/IE/style.less @@ -0,0 +1,179 @@ +.voice_commands { + width: 1200px; + height: 410px; + background: #FFFFFF; + padding: 40px 50px 50px 50px; + box-sizing: border-box; + display: flex; + // 交通报销 + .voice_commands_traffic { + width: 468px; + height: 320px; + .voice_commands_traffic_title { + height: 26px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #000000; + letter-spacing: 0; + line-height: 26px; + font-weight: 500; + margin-bottom: 30px; + // background: pink; + }; + .voice_commands_traffic_wrapper { + width: 465px; + height: 264px; + // background: #FAFAFA; + position: relative; + .voice_commands_traffic_wrapper_move { + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%,-50%); + }; + .traffic_btn_img_btn { + width: 116px; + height: 116px; + background: #2932E1; + display: flex; + justify-content: center; + align-items: center; + border-radius: 50%; + cursor: pointer; + margin-bottom: 20px; + margin-left: 84px; + &:hover { + width: 116px; + height: 116px; + background: #7278F5; + + .start_recorder_img{ + width: 50px; + height: 50px; + background: url("../../../assets/image/ic_开始聊天_hover.svg") no-repeat; + background-position: center; + background-size: 50px 50px; + }; + + }; + + .start_recorder_img{ + width: 50px; + height: 50px; + background: url("../../../assets/image/ic_开始聊天.svg") no-repeat; + background-position: center; + background-size: 50px 50px; + }; + + }; + .traffic_btn_prompt { + height: 22px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #000000; + font-weight: 500; + margin-bottom: 16px; + margin-left: 110px; + }; + .traffic_btn_list { + height: 20px; + font-family: PingFangSC-Regular; + font-size: 12px; + color: #999999; + font-weight: 400; + width: 112%; + }; + }; + }; + //指向 + .voice_point_to { + width: 47px; + height: 63px; + background: url("../../../assets/image/步骤-箭头切图@2x.png") no-repeat; + background-position: center; + background-size: 47px 63px; + margin-top: 164px; + margin-right: 82px; + }; + //识别结果 + .voice_commands_IdentifyTheResults { + .voice_commands_IdentifyTheResults_title { + height: 26px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #000000; + line-height: 26px; + font-weight: 500; + margin-bottom: 30px; + }; + // 显示框 + .voice_commands_IdentifyTheResults_show { + width: 503px; + height: 264px; + background: #FAFAFA; + padding: 40px 0px 0px 50px; + box-sizing: border-box; + .voice_commands_IdentifyTheResults_show_title { + height: 22px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #000000; + // text-align: center; + font-weight: 500; + margin-bottom: 30px; + }; + .oice_commands_IdentifyTheResults_show_time { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #666666; + font-weight: 500; + margin-bottom: 12px; + }; + .oice_commands_IdentifyTheResults_show_money { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #666666; + font-weight: 500; + margin-bottom: 12px; + }; + .oice_commands_IdentifyTheResults_show_origin { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #666666; + font-weight: 500; + margin-bottom: 12px; + }; + .oice_commands_IdentifyTheResults_show_destination { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #666666; + font-weight: 500; + }; + }; + //加载状态 + .voice_commands_IdentifyTheResults_show_loading { + width: 503px; + height: 264px; + background: #FAFAFA; + padding: 40px 0px 0px 50px; + box-sizing: border-box; + display: flex; + justify-content: center; + align-items: center; + }; + }; + .end_recorder_img { + width: 50px; + height: 50px; + background: url("../../../assets/image/ic_大-声音波浪.svg") no-repeat; + background-position: center; + background-size: 50px 50px; + }; + .end_recorder_img:hover { + opacity: 0.9; + }; +}; \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/TTS/TTST.vue b/demos/speech_web/web_client/src/components/SubMenu/TTS/TTST.vue new file mode 100644 index 0000000000000000000000000000000000000000..353221f7bdc6830090f3e581896ac078d578cdd8 --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/TTS/TTST.vue @@ -0,0 +1,359 @@ + + + + + + + \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/TTS/style.less b/demos/speech_web/web_client/src/components/SubMenu/TTS/style.less new file mode 100644 index 0000000000000000000000000000000000000000..b5d189650276a09859a1c57984742f4e37a65219 --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/TTS/style.less @@ -0,0 +1,369 @@ +.speech_recognition { + width: 1200px; + height: 410px; + background: #FFFFFF; + padding: 40px 0px 50px 50px; + box-sizing: border-box; + display: flex; + .recognition_text { + width: 589px; + height: 320px; + // background: pink; + .recognition_text_header { + margin-bottom: 30px; + display: flex; + justify-content: space-between; + align-items: center; + .recognition_text_title { + height: 26px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #000000; + letter-spacing: 0; + line-height: 26px; + font-weight: 500; + }; + .recognition_text_random { + display: flex; + align-items: center; + cursor: pointer; + span { + display: inline-block; + &:nth-of-type(1) { + width: 20px; + height: 20px; + background: url("../../../assets/image/ic_更换示例.svg") no-repeat; + background-position: center; + background-size: 20px 20px; + margin-right: 5px; + + }; + &:nth-of-type(2) { + height: 20px; + font-family: PingFangSC-Regular; + font-size: 14px; + color: #2932E1; + letter-spacing: 0; + font-weight: 400; + }; + }; + }; + }; + .recognition_text_field { + width: 589px; + height: 264px; + background: #FAFAFA; + .textToSpeech_content_show_text{ + width: 100%; + height: 264px; + padding: 0px 30px 30px 0px; + box-sizing: border-box; + .ant-input { + height: 208px; + resize: none; + // margin-bottom: 230px; + padding: 21px 20px; + }; + }; + }; + }; + // 指向 + .recognition_point_to { + width: 47px; + height: 63px; + background: url("../../../assets/image/步骤-箭头切图@2x.png") no-repeat; + background-position: center; + background-size: 47px 63px; + margin-top: 164px; + margin-right: 101px; + margin-left: 100px; + margin-top: 164px; + }; + // 语音合成 + .speech_recognition_new { + .speech_recognition_title { + height: 26px; + font-family: PingFangSC-Medium; + font-size: 16px; + color: #000000; + line-height: 26px; + font-weight: 500; + margin-left: 32px; + margin-bottom: 96px; + }; + // 流式合成 + .speech_recognition_streaming { + width: 136px; + height: 44px; + background: #2932E1; + border-radius: 22px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #FFFFFF; + font-weight: 500; + text-align: center; + line-height: 44px; + margin-bottom: 40px; + cursor: pointer; + &:hover { + opacity: .9; + }; + }; + // 合成中 + .streaming_ing_box { + display: flex; + align-items: center; + height: 44px; + margin-bottom: 40px; + .streaming_ing { + width: 136px; + height: 44px; + background: #7278F5; + border-radius: 22px; + display: flex; + justify-content: center; + align-items: center; + cursor: pointer; + + .streaming_ing_img { + width: 16px; + height: 16px; + // background: url("../../../assets/image/ic_小-录制语音.svg"); + // background-repeat: no-repeat; + // background-position: center; + // background-size: 16px 16px; + // margin-right: 12px; + }; + .streaming_ing_text { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #FFFFFF; + font-weight: 500; + margin-left: 12px; + }; + }; + // 合成时间文字 + .streaming_time { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #000000; + font-weight: 500; + margin-left: 12px; + }; + }; + + + // 暂停播放 + .streaming_suspended_box { + display: flex; + align-items: center; + height: 44px; + margin-bottom: 40px; + .streaming_suspended { + width: 136px; + height: 44px; + background: #2932E1; + border-radius: 22px; + display: flex; + justify-content: center; + align-items: center; + cursor: pointer; + + .streaming_suspended_img { + width: 16px; + height: 16px; + background: url("../../../assets/image/ic_暂停(按钮).svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 16px 16px; + margin-right: 12px; + }; + .streaming_suspended_text { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #FFFFFF; + font-weight: 500; + margin-left: 12px; + }; + + }; + // 暂停获取时间 + .suspended_time { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #000000; + font-weight: 500; + margin-left: 12px; + } + }; + + // 继续播放 + .streaming_continue { + width: 136px; + height: 44px; + background: #2932E1; + border-radius: 22px; + display: flex; + justify-content: center; + align-items: center; + cursor: pointer; + margin-bottom: 40px; + .streaming_continue_img { + width: 16px; + height: 16px; + background: url("../../../assets/image/ic_播放(按钮).svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 16px 16px; + margin-right: 12px; + }; + .streaming_continue_text { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #FFFFFF; + font-weight: 500; + }; + }; + + + + + + + // 端到端合成 + .speech_recognition_end_to_end { + width: 136px; + height: 44px; + background: #2932E1; + border-radius: 22px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #FFFFFF; + font-weight: 500; + text-align: center; + line-height: 44px; + cursor: pointer; + &:hover { + opacity: .9; + }; + }; + // 合成中 + .end_to_end_ing_box { + display: flex; + align-items: center; + height: 44px; + .end_to_end_ing { + width: 136px; + height: 44px; + background: #7278F5; + border-radius: 22px; + display: flex; + justify-content: center; + align-items: center; + cursor: pointer; + .end_to_end_ing_img { + width: 16px; + height: 16px; + // background: url("../../../assets/image/ic_小-录制语音.svg"); + // background-repeat: no-repeat; + // background-position: center; + // background-size: 16px 16px; + + }; + .end_to_end_ing_text { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #FFFFFF; + font-weight: 500; + margin-left: 12px; + }; + }; + // 合成时间文本 + .end_to_end_ing_time { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #000000; + font-weight: 500; + margin-left: 12px; + }; + }; + + + // 暂停播放 + .end_to_end_suspended_box { + display: flex; + align-items: center; + height: 44px; + .end_to_end_suspended { + width: 136px; + height: 44px; + background: #2932E1; + border-radius: 22px; + display: flex; + justify-content: center; + align-items: center; + cursor: pointer; + .end_to_end_suspended_img { + width: 16px; + height: 16px; + background: url("../../../assets/image/ic_暂停(按钮).svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 16px 16px; + margin-right: 12px; + }; + .end_to_end_suspended_text { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #FFFFFF; + font-weight: 500; + }; + }; + // 暂停播放时间 + .end_to_end_ing_suspended_time { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #000000; + font-weight: 500; + margin-left: 12px; + }; + }; + + // 继续播放 + .end_to_end_continue { + width: 136px; + height: 44px; + background: #2932E1; + border-radius: 22px; + display: flex; + justify-content: center; + align-items: center; + cursor: pointer; + .end_to_end_continue_img { + width: 16px; + height: 16px; + background: url("../../../assets/image/ic_播放(按钮).svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 16px 16px; + margin-right: 12px; + }; + .end_to_end_continue_text { + height: 20px; + font-family: PingFangSC-Medium; + font-size: 14px; + color: #FFFFFF; + font-weight: 500; + }; + }; + }; +}; \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/VPR/VPR.vue b/demos/speech_web/web_client/src/components/SubMenu/VPR/VPR.vue new file mode 100644 index 0000000000000000000000000000000000000000..1fe71e4d85d3e58c6641c559444c5abe1267ea6c --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/VPR/VPR.vue @@ -0,0 +1,178 @@ + + + + + \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/VPR/VPRT.vue b/demos/speech_web/web_client/src/components/SubMenu/VPR/VPRT.vue new file mode 100644 index 0000000000000000000000000000000000000000..e398da00cdbc7b4a3fcdd35ec2d4a242bb768790 --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/VPR/VPRT.vue @@ -0,0 +1,335 @@ + + + + + \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/SubMenu/VPR/style.less b/demos/speech_web/web_client/src/components/SubMenu/VPR/style.less new file mode 100644 index 0000000000000000000000000000000000000000..cb3df49ef2095ffd3039ad6599b0b9573aa19995 --- /dev/null +++ b/demos/speech_web/web_client/src/components/SubMenu/VPR/style.less @@ -0,0 +1,419 @@ +.voiceprint { + width: 1200px; + height: 410px; + background: #FFFFFF; + padding: 41px 80px 56px 80px; + box-sizing: border-box; + display: flex; + // 录制声纹 + .voiceprint_recording { + width: 423px; + height: 354px; + margin-right: 66px; + .recording_title { + display: flex; + align-items: center; + margin-bottom: 20px; + div { + &:nth-of-type(1) { + width: 24px; + height: 24px; + background: rgba(41,50,225,0.70); + font-family: PingFangSC-Regular; + font-size: 16px; + color: #FFFFFF; + letter-spacing: 0; + text-align: center; + line-height: 24px; + font-weight: 400; + margin-right: 16px; + border-radius: 50%; + }; + &:nth-of-type(2) { + height: 26px; + font-family: PingFangSC-Regular; + font-size: 16px; + color: #000000; + line-height: 26px; + font-weight: 400; + }; + }; + }; + // 开始录音 + .recording_btn { + width: 143px; + height: 44px; + cursor: pointer; + background: #2932E1; + padding: 0px 24px 0px 21px; + box-sizing: border-box; + border-radius: 22px; + display: flex; + align-items: center; + margin-bottom: 20px; + margin-top: 10px; + + &:hover { + background: #7278F5; + .recording_img { + width: 20px; + height: 20px; + background: url("../../../assets/image//icon_录制声音小语音1.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 20px 20px; + margin-right: 8.26px; + + }; + } + .recording_img { + width: 20px; + height: 20px; + background: url("../../../assets/image//icon_录制声音小语音1.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 20px 20px; + margin-right: 8.26px; + + }; + .recording_prompt { + height: 20px; + font-family: PingFangSC-Regular; + font-size: 12px; + color: #FFFFFF; + font-weight: 400; + }; + + }; + // 录音中 + .recording_btn_the_recording { + width: 143px; + height: 44px; + cursor: pointer; + background: #7278F5; + padding: 0px 24px 0px 21px; + box-sizing: border-box; + border-radius: 22px; + display: flex; + align-items: center; + justify-content: center; + margin-bottom: 40px; + .recording_img_the_recording { + width: 20px; + height: 20px; + background: url("../../../assets/image//icon_小-声音波浪.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 20px 20px; + margin-right: 8.26px; + }; + .recording_prompt { + height: 20px; + font-family: PingFangSC-Regular; + font-size: 12px; + color: #FFFFFF; + font-weight: 400; + }; + }; + // 完成录音 + .complete_the_recording_btn { + width: 143px; + height: 44px; + cursor: pointer; + background: #2932E1; + padding: 0px 24px 0px 21px; + box-sizing: border-box; + border-radius: 22px; + display: flex; + align-items: center; + margin-bottom: 40px; + &:hover { + background: #7278F5; + .complete_the_recording_img { + width: 20px; + height: 20px; + background: url("../../../assets/image//icon_小-声音波浪.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 20px 20px; + margin-right: 8.26px; + + }; + } + .complete_the_recording_img { + width: 20px; + height: 20px; + background: url("../../../assets/image//icon_小-声音波浪.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 20px 20px; + margin-right: 8.26px; + + }; + .complete_the_recording_prompt { + height: 20px; + font-family: PingFangSC-Regular; + font-size: 12px; + color: #FFFFFF; + font-weight: 400; + }; + + }; + // table + .recording_table { + width: 322px; + .recording_table_box { + .ant-table-thead > tr > th { + color: rgba(0, 0, 0, 0.85); + font-weight: 500; + text-align: left; + background: rgba(40,50,225,0.08); + border-bottom: none; + transition: background 0.3s ease; + height: 22px; + font-family: PingFangSC-Regular; + font-size: 16px; + color: #333333; + // text-align: center; + font-weight: 400; + &:nth-of-type(2) { + border-left: 2px solid white; + }; + }; + .ant-table-tbody > tr > td { + border-bottom: 1px solid #f0f0f0; + transition: background 0.3s; + height: 22px; + font-family: PingFangSC-Regular; + font-size: 16px; + color: #333333; + // text-align: center; + font-weight: 400; + }; + }; + }; + // input + .recording_input { + width: 322px; + margin-bottom: 20px; + }; + }; + // 指向 + .recording_point_to { + width: 63px; + height: 47px; + background: url("../../../assets/image//步骤-箭头切图@2x.png"); + background-repeat: no-repeat; + background-position: center; + background-size: 63px 47px; + margin-right: 66px; + margin-top: 198px; + }; + //识别声纹 + .voiceprint_identify { + width: 423px; + height: 354px; + .identify_title { + display: flex; + align-items: center; + margin-bottom: 20px; + div { + &:nth-of-type(1) { + width: 24px; + height: 24px; + background: rgba(41,50,225,0.70); + font-family: PingFangSC-Regular; + font-size: 16px; + color: #FFFFFF; + letter-spacing: 0; + text-align: center; + line-height: 24px; + font-weight: 400; + margin-right: 16px; + border-radius: 50%; + }; + &:nth-of-type(2) { + height: 26px; + font-family: PingFangSC-Regular; + font-size: 16px; + color: #000000; + line-height: 26px; + font-weight: 400; + }; + }; + }; + // 开始识别 + .identify_btn { + width: 143px; + height: 44px; + cursor: pointer; + background: #2932E1; + padding: 0px 24px 0px 21px; + box-sizing: border-box; + border-radius: 22px; + display: flex; + align-items: center; + margin-bottom: 40px; + margin-top: 10px; + &:hover { + background: #7278F5; + .identify_img { + width: 20px; + height: 20px; + background: url("../../../assets/image//icon_录制声音小语音1.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 20px 20px; + margin-right: 8.26px; + + }; + } + .identify_img { + width: 20px; + height: 20px; + background: url("../../../assets/image//icon_录制声音小语音1.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 20px 20px; + margin-right: 8.26px; + + }; + .identify_prompt { + height: 20px; + font-family: PingFangSC-Regular; + font-size: 12px; + color: #FFFFFF; + font-weight: 400; + }; + + }; + // 识别中 + .identify_btn_the_recording { + width: 143px; + height: 44px; + cursor: pointer; + background: #7278F5; + padding: 0px 24px 0px 21px; + box-sizing: border-box; + border-radius: 22px; + display: flex; + align-items: center; + justify-content: center; + margin-bottom: 40px; + .identify_img_the_recording { + width: 20px; + height: 20px; + background: url("../../../assets/image//icon_录制声音小语音1.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 20px 20px; + margin-right: 8.26px; + }; + .recording_prompt { + height: 20px; + font-family: PingFangSC-Regular; + font-size: 12px; + color: #FFFFFF; + font-weight: 400; + }; + }; + // 完成识别 + .identify_complete_the_recording_btn { + width: 143px; + height: 44px; + cursor: pointer; + background: #2932E1; + padding: 0px 24px 0px 21px; + box-sizing: border-box; + border-radius: 22px; + display: flex; + align-items: center; + margin-bottom: 40px; + &:hover { + background: #7278F5; + .identify_complete_the_recording_img { + width: 20px; + height: 20px; + background: url("../../../assets/image//icon_小-声音波浪.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 20px 20px; + margin-right: 8.26px; + + }; + } + .identify_complete_the_recording_img { + width: 20px; + height: 20px; + background: url("../../../assets/image//icon_小-声音波浪.svg"); + background-repeat: no-repeat; + background-position: center; + background-size: 20px 20px; + margin-right: 8.26px; + + }; + .identify_complete_the_recording_prompt { + height: 20px; + font-family: PingFangSC-Regular; + font-size: 12px; + color: #FFFFFF; + font-weight: 400; + }; + + }; + + + + + // 结果 + .identify_result { + width: 422px; + height: 184px; + text-align: center; + line-height: 184px; + background: #FAFAFA; + position: relative; + .identify_result_default { + + font-family: PingFangSC-Regular; + font-size: 16px; + color: #999999; + font-weight: 400; + }; + .identify_result_content { + // text-align: center; + // position: absolute; + // top: 50%; + // left: 50%; + // transform: translate(-50%,-50%); + div { + &:nth-of-type(1) { + height: 22px; + font-family: PingFangSC-Regular; + font-size: 16px; + color: #666666; + font-weight: 400; + margin-bottom: 10px; + }; + &:nth-of-type(2) { + height: 33px; + font-family: PingFangSC-Medium; + font-size: 24px; + color: #000000; + font-weight: 500; + }; + }; + }; + }; + }; + .action_btn { + display: inline-block; + height: 22px; + font-family: PingFangSC-Regular; + font-size: 16px; + color: #2932E1; + text-align: center; + font-weight: 400; + cursor: pointer; + }; +}; \ No newline at end of file diff --git a/demos/speech_web/web_client/src/components/style.less b/demos/speech_web/web_client/src/components/style.less new file mode 100644 index 0000000000000000000000000000000000000000..98f414f1c52c6d509f24d08f13d0cd237256387f --- /dev/null +++ b/demos/speech_web/web_client/src/components/style.less @@ -0,0 +1,83 @@ +.experience { + width: 100%; + height: 709px; + // background: url("../assets/image/在线体验-背景@2x.png") no-repeat; + background-size: 100% 709px; + background-position: initial; + // + .experience_wrapper { + width: 1200px; + height: 709px; + margin: 0 auto; + padding: 0px 0px 0px 0px; + box-sizing: border-box; + // background: red; + .experience_title { + height: 42px; + font-family: PingFangSC-Semibold; + font-size: 30px; + color: #000000; + font-weight: 600; + line-height: 42px; + text-align: center; + margin-bottom: 10px; + }; + .experience_describe { + height: 22px; + font-family: PingFangSC-Regular; + font-size: 14px; + color: #666666; + letter-spacing: 0; + text-align: center; + line-height: 22px; + font-weight: 400; + margin-bottom: 30px; + }; + .experience_content { + width: 1200px; + margin: 0 auto; + display: flex; + justify-content: center; + .experience_tabs { + + margin-top: 15px; + + & > .ant-tabs-nav { + margin-bottom: 20px; + + &::before { + content: none; + } + + .ant-tabs-nav-wrap { + justify-content: center; + } + + .ant-tabs-tab { + font-size: 20px; + } + + .ant-tabs-nav-list { + margin-right: -32px; + flex: none; + } + }; + + .ant-tabs-nav::before { + position: absolute; + right: 0; + left: 0; + border-bottom: 1px solid #f6f7fe; + content: ''; + }; + + }; + }; + }; +}; +.experience::after { + content: ""; + display: block; + clear: both; + visibility: hidden; +} \ No newline at end of file diff --git a/demos/speech_web/web_client/src/main.js b/demos/speech_web/web_client/src/main.js new file mode 100644 index 0000000000000000000000000000000000000000..3fbf87c85ae8008ce647cdb1ccaafa987289d4b4 --- /dev/null +++ b/demos/speech_web/web_client/src/main.js @@ -0,0 +1,13 @@ +import { createApp } from 'vue' +import ElementPlus from 'element-plus' +import 'element-plus/dist/index.css' +import Antd from 'ant-design-vue'; +import 'ant-design-vue/dist/antd.css'; +import App from './App.vue' +import axios from 'axios' + +const app = createApp(App) +app.config.globalProperties.$http = axios + +app.use(ElementPlus).use(Antd) +app.mount('#app') diff --git a/demos/speech_web/web_client/vite.config.js b/demos/speech_web/web_client/vite.config.js new file mode 100644 index 0000000000000000000000000000000000000000..dc7e6978c0196f5c54f4aa994c77aee643297e47 --- /dev/null +++ b/demos/speech_web/web_client/vite.config.js @@ -0,0 +1,28 @@ +import { defineConfig } from 'vite' +import vue from '@vitejs/plugin-vue' + +// https://vitejs.dev/config/ +export default defineConfig({ + plugins: [vue()], + css: + { preprocessorOptions: + { css: + { + charset: false + } + } + }, + build: { + assetsInlineLimit: '2048' // 2kb + }, + server: { + host: "0.0.0.0", + proxy: { + "/api": { + target: "http://localhost:8010", + changeOrigin: true, + rewrite: (path) => path.replace(/^\/api/, ""), + }, + }, + }, +}) diff --git a/demos/speech_web/web_client/yarn.lock b/demos/speech_web/web_client/yarn.lock new file mode 100644 index 0000000000000000000000000000000000000000..4504eab36a3b74e4a482e7f96c7b93c3457385a0 --- /dev/null +++ b/demos/speech_web/web_client/yarn.lock @@ -0,0 +1,785 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@ant-design/colors@^6.0.0": + version "6.0.0" + resolved "https://registry.npmmirror.com/@ant-design/colors/-/colors-6.0.0.tgz" + integrity sha512-qAZRvPzfdWHtfameEGP2Qvuf838NhergR35o+EuVyB5XvSA98xod5r4utvi4TJ3ywmevm290g9nsCG5MryrdWQ== + dependencies: + "@ctrl/tinycolor" "^3.4.0" + +"@ant-design/icons-svg@^4.2.1": + version "4.2.1" + resolved "https://registry.npmmirror.com/@ant-design/icons-svg/-/icons-svg-4.2.1.tgz" + integrity sha512-EB0iwlKDGpG93hW8f85CTJTs4SvMX7tt5ceupvhALp1IF44SeUFOMhKUOYqpsoYWQKAOuTRDMqn75rEaKDp0Xw== + +"@ant-design/icons-vue@^6.0.0": + version "6.1.0" + resolved "https://registry.npmmirror.com/@ant-design/icons-vue/-/icons-vue-6.1.0.tgz" + integrity sha512-EX6bYm56V+ZrKN7+3MT/ubDkvJ5rK/O2t380WFRflDcVFgsvl3NLH7Wxeau6R8DbrO5jWR6DSTC3B6gYFp77AA== + dependencies: + "@ant-design/colors" "^6.0.0" + "@ant-design/icons-svg" "^4.2.1" + +"@babel/parser@^7.16.4": + version "7.17.9" + resolved "https://registry.npmmirror.com/@babel/parser/-/parser-7.17.9.tgz" + integrity sha512-vqUSBLP8dQHFPdPi9bc5GK9vRkYHJ49fsZdtoJ8EQ8ibpwk5rPKfvNIwChB0KVXcIjcepEBBd2VHC5r9Gy8ueg== + +"@babel/runtime@^7.10.5": + version "7.17.9" + resolved "https://registry.npmmirror.com/@babel/runtime/-/runtime-7.17.9.tgz" + integrity sha512-lSiBBvodq29uShpWGNbgFdKYNiFDo5/HIYsaCEY9ff4sb10x9jizo2+pRrSyF4jKZCXqgzuqBOQKbUm90gQwJg== + dependencies: + regenerator-runtime "^0.13.4" + +"@ctrl/tinycolor@^3.4.0": + version "3.4.1" + resolved "https://registry.npmmirror.com/@ctrl/tinycolor/-/tinycolor-3.4.1.tgz" + integrity sha512-ej5oVy6lykXsvieQtqZxCOaLT+xD4+QNarq78cIYISHmZXshCvROLudpQN3lfL8G0NL7plMSSK+zlyvCaIJ4Iw== + +"@element-plus/icons-vue@^1.1.4": + version "1.1.4" + resolved "https://registry.npmmirror.com/@element-plus/icons-vue/-/icons-vue-1.1.4.tgz" + integrity sha512-Iz/nHqdp1sFPmdzRwHkEQQA3lKvoObk8azgABZ81QUOpW9s/lUyQVUSh0tNtEPZXQlKwlSh7SPgoVxzrE0uuVQ== + +"@floating-ui/core@^0.6.1": + version "0.6.1" + resolved "https://registry.npmmirror.com/@floating-ui/core/-/core-0.6.1.tgz" + integrity sha512-Y30eVMcZva8o84c0HcXAtDO4BEzPJMvF6+B7x7urL2xbAqVsGJhojOyHLaoQHQYjb6OkqRq5kO+zeySycQwKqg== + +"@floating-ui/dom@^0.4.2": + version "0.4.4" + resolved "https://registry.npmmirror.com/@floating-ui/dom/-/dom-0.4.4.tgz" + integrity sha512-0Ulu3B/dqQplUUSqnTx0foSrlYuMN+GTtlJWvNJwt6Fr7/PqmlR/Y08o6/+bxDWr6p3roBJRaQ51MDZsNmEhhw== + dependencies: + "@floating-ui/core" "^0.6.1" + +"@popperjs/core@^2.11.4": + version "2.11.5" + resolved "https://registry.npmmirror.com/@popperjs/core/-/core-2.11.5.tgz" + integrity sha512-9X2obfABZuDVLCgPK9aX0a/x4jaOEweTTWE2+9sr0Qqqevj2Uv5XorvusThmc9XGYpS9yI+fhh8RTafBtGposw== + +"@simonwep/pickr@~1.8.0": + version "1.8.2" + resolved "https://registry.npmmirror.com/@simonwep/pickr/-/pickr-1.8.2.tgz" + integrity sha512-/l5w8BIkrpP6n1xsetx9MWPWlU6OblN5YgZZphxan0Tq4BByTCETL6lyIeY8lagalS2Nbt4F2W034KHLIiunKA== + dependencies: + core-js "^3.15.1" + nanopop "^2.1.0" + +"@types/lodash-es@^4.17.6": + version "4.17.6" + resolved "https://registry.npmmirror.com/@types/lodash-es/-/lodash-es-4.17.6.tgz" + integrity sha512-R+zTeVUKDdfoRxpAryaQNRKk3105Rrgx2CFRClIgRGaqDTdjsm8h6IYA8ir584W3ePzkZfst5xIgDwYrlh9HLg== + dependencies: + "@types/lodash" "*" + +"@types/lodash@*", "@types/lodash@^4.14.181": + version "4.14.181" + resolved "https://registry.npmmirror.com/@types/lodash/-/lodash-4.14.181.tgz" + integrity sha512-n3tyKthHJbkiWhDZs3DkhkCzt2MexYHXlX0td5iMplyfwketaOeKboEVBqzceH7juqvEg3q5oUoBFxSLu7zFag== + +"@vitejs/plugin-vue@^2.3.0": + version "2.3.1" + resolved "https://registry.npmmirror.com/@vitejs/plugin-vue/-/plugin-vue-2.3.1.tgz" + integrity sha512-YNzBt8+jt6bSwpt7LP890U1UcTOIZZxfpE5WOJ638PNxSEKOqAi0+FSKS0nVeukfdZ0Ai/H7AFd6k3hayfGZqQ== + +"@vue/compiler-core@3.2.32": + version "3.2.32" + resolved "https://registry.npmmirror.com/@vue/compiler-core/-/compiler-core-3.2.32.tgz" + integrity sha512-bRQ8Rkpm/aYFElDWtKkTPHeLnX5pEkNxhPUcqu5crEJIilZH0yeFu/qUAcV4VfSE2AudNPkQSOwMZofhnuutmA== + dependencies: + "@babel/parser" "^7.16.4" + "@vue/shared" "3.2.32" + estree-walker "^2.0.2" + source-map "^0.6.1" + +"@vue/compiler-dom@3.2.32": + version "3.2.32" + resolved "https://registry.npmmirror.com/@vue/compiler-dom/-/compiler-dom-3.2.32.tgz" + integrity sha512-maa3PNB/NxR17h2hDQfcmS02o1f9r9QIpN1y6fe8tWPrS1E4+q8MqrvDDQNhYVPd84rc3ybtyumrgm9D5Rf/kg== + dependencies: + "@vue/compiler-core" "3.2.32" + "@vue/shared" "3.2.32" + +"@vue/compiler-sfc@3.2.32": + version "3.2.32" + resolved "https://registry.npmmirror.com/@vue/compiler-sfc/-/compiler-sfc-3.2.32.tgz" + integrity sha512-uO6+Gh3AVdWm72lRRCjMr8nMOEqc6ezT9lWs5dPzh1E9TNaJkMYPaRtdY9flUv/fyVQotkfjY/ponjfR+trPSg== + dependencies: + "@babel/parser" "^7.16.4" + "@vue/compiler-core" "3.2.32" + "@vue/compiler-dom" "3.2.32" + "@vue/compiler-ssr" "3.2.32" + "@vue/reactivity-transform" "3.2.32" + "@vue/shared" "3.2.32" + estree-walker "^2.0.2" + magic-string "^0.25.7" + postcss "^8.1.10" + source-map "^0.6.1" + +"@vue/compiler-ssr@3.2.32": + version "3.2.32" + resolved "https://registry.npmmirror.com/@vue/compiler-ssr/-/compiler-ssr-3.2.32.tgz" + integrity sha512-ZklVUF/SgTx6yrDUkaTaBL/JMVOtSocP+z5Xz/qIqqLdW/hWL90P+ob/jOQ0Xc/om57892Q7sRSrex0wujOL2Q== + dependencies: + "@vue/compiler-dom" "3.2.32" + "@vue/shared" "3.2.32" + +"@vue/reactivity-transform@3.2.32": + version "3.2.32" + resolved "https://registry.npmmirror.com/@vue/reactivity-transform/-/reactivity-transform-3.2.32.tgz" + integrity sha512-CW1W9zaJtE275tZSWIfQKiPG0iHpdtSlmTqYBu7Y62qvtMgKG5yOxtvBs4RlrZHlaqFSE26avLAgQiTp4YHozw== + dependencies: + "@babel/parser" "^7.16.4" + "@vue/compiler-core" "3.2.32" + "@vue/shared" "3.2.32" + estree-walker "^2.0.2" + magic-string "^0.25.7" + +"@vue/reactivity@3.2.32": + version "3.2.32" + resolved "https://registry.npmmirror.com/@vue/reactivity/-/reactivity-3.2.32.tgz" + integrity sha512-4zaDumuyDqkuhbb63hRd+YHFGopW7srFIWesLUQ2su/rJfWrSq3YUvoKAJE8Eu1EhZ2Q4c1NuwnEreKj1FkDxA== + dependencies: + "@vue/shared" "3.2.32" + +"@vue/runtime-core@3.2.32": + version "3.2.32" + resolved "https://registry.npmmirror.com/@vue/runtime-core/-/runtime-core-3.2.32.tgz" + integrity sha512-uKKzK6LaCnbCJ7rcHvsK0azHLGpqs+Vi9B28CV1mfWVq1F3Bj8Okk3cX+5DtD06aUh4V2bYhS2UjjWiUUKUF0w== + dependencies: + "@vue/reactivity" "3.2.32" + "@vue/shared" "3.2.32" + +"@vue/runtime-dom@3.2.32": + version "3.2.32" + resolved "https://registry.npmmirror.com/@vue/runtime-dom/-/runtime-dom-3.2.32.tgz" + integrity sha512-AmlIg+GPqjkNoADLjHojEX5RGcAg+TsgXOOcUrtDHwKvA8mO26EnLQLB8nylDjU6AMJh2CIYn8NEgyOV5ZIScQ== + dependencies: + "@vue/runtime-core" "3.2.32" + "@vue/shared" "3.2.32" + csstype "^2.6.8" + +"@vue/server-renderer@3.2.32": + version "3.2.32" + resolved "https://registry.npmmirror.com/@vue/server-renderer/-/server-renderer-3.2.32.tgz" + integrity sha512-TYKpZZfRJpGTTiy/s6bVYwQJpAUx3G03z4G7/3O18M11oacrMTVHaHjiPuPqf3xQtY8R4LKmQ3EOT/DRCA/7Wg== + dependencies: + "@vue/compiler-ssr" "3.2.32" + "@vue/shared" "3.2.32" + +"@vue/shared@3.2.32": + version "3.2.32" + resolved "https://registry.npmmirror.com/@vue/shared/-/shared-3.2.32.tgz" + integrity sha512-bjcixPErUsAnTQRQX4Z5IQnICYjIfNCyCl8p29v1M6kfVzvwOICPw+dz48nNuWlTOOx2RHhzHdazJibE8GSnsw== + +"@vueuse/core@^8.2.4": + version "8.2.5" + resolved "https://registry.npmmirror.com/@vueuse/core/-/core-8.2.5.tgz" + integrity sha512-5prZAA1Ji2ltwNUnzreu6WIXYqHYP/9U2BiY5mD/650VYLpVcwVlYznJDFcLCmEWI3o3Vd34oS1FUf+6Mh68GQ== + dependencies: + "@vueuse/metadata" "8.2.5" + "@vueuse/shared" "8.2.5" + vue-demi "*" + +"@vueuse/metadata@8.2.5": + version "8.2.5" + resolved "https://registry.npmmirror.com/@vueuse/metadata/-/metadata-8.2.5.tgz" + integrity sha512-Lk9plJjh9cIdiRdcj16dau+2LANxIdFCiTgdfzwYXbflxq0QnMBeOD2qHgKDE7fuVrtPcVWj8VSuZEx1HRfNQA== + +"@vueuse/shared@8.2.5": + version "8.2.5" + resolved "https://registry.npmmirror.com/@vueuse/shared/-/shared-8.2.5.tgz" + integrity sha512-lNWo+7sk6JCuOj4AiYM+6HZ6fq4xAuVq1sVckMQKgfCJZpZRe4i8es+ZULO5bYTKP+VrOCtqrLR2GzEfrbr3YQ== + dependencies: + vue-demi "*" + +ant-design-vue@^2.2.8: + version "2.2.8" + resolved "https://registry.npmmirror.com/ant-design-vue/-/ant-design-vue-2.2.8.tgz" + integrity sha512-3graq9/gCfJQs6hznrHV6sa9oDmk/D1H3Oo0vLdVpPS/I61fZPk8NEyNKCHpNA6fT2cx6xx9U3QS63uuyikg/Q== + dependencies: + "@ant-design/icons-vue" "^6.0.0" + "@babel/runtime" "^7.10.5" + "@simonwep/pickr" "~1.8.0" + array-tree-filter "^2.1.0" + async-validator "^3.3.0" + dom-align "^1.12.1" + dom-scroll-into-view "^2.0.0" + lodash "^4.17.21" + lodash-es "^4.17.15" + moment "^2.27.0" + omit.js "^2.0.0" + resize-observer-polyfill "^1.5.1" + scroll-into-view-if-needed "^2.2.25" + shallow-equal "^1.0.0" + vue-types "^3.0.0" + warning "^4.0.0" + +array-tree-filter@^2.1.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/array-tree-filter/-/array-tree-filter-2.1.0.tgz" + integrity sha512-4ROwICNlNw/Hqa9v+rk5h22KjmzB1JGTMVKP2AKJBOCgb0yL0ASf0+YvCcLNNwquOHNX48jkeZIJ3a+oOQqKcw== + +async-validator@^3.3.0: + version "3.5.2" + resolved "https://registry.npmmirror.com/async-validator/-/async-validator-3.5.2.tgz" + integrity sha512-8eLCg00W9pIRZSB781UUX/H6Oskmm8xloZfr09lz5bikRpBVDlJ3hRVuxxP1SxcwsEYfJ4IU8Q19Y8/893r3rQ== + +async-validator@^4.0.7: + version "4.0.7" + resolved "https://registry.npmmirror.com/async-validator/-/async-validator-4.0.7.tgz" + integrity sha512-Pj2IR7u8hmUEDOwB++su6baaRi+QvsgajuFB9j95foM1N2gy5HM4z60hfusIO0fBPG5uLAEl6yCJr1jNSVugEQ== + +axios@^0.26.1: + version "0.26.1" + resolved "https://registry.npmmirror.com/axios/-/axios-0.26.1.tgz" + integrity sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA== + dependencies: + follow-redirects "^1.14.8" + +compute-scroll-into-view@^1.0.17: + version "1.0.17" + resolved "https://registry.npmmirror.com/compute-scroll-into-view/-/compute-scroll-into-view-1.0.17.tgz" + integrity sha512-j4dx+Fb0URmzbwwMUrhqWM2BEWHdFGx+qZ9qqASHRPqvTYdqvWnHg0H1hIbcyLnvgnoNAVMlwkepyqM3DaIFUg== + +copy-anything@^2.0.1: + version "2.0.6" + resolved "https://registry.npmmirror.com/copy-anything/-/copy-anything-2.0.6.tgz" + integrity sha512-1j20GZTsvKNkc4BY3NpMOM8tt///wY3FpIzozTOFO2ffuZcV61nojHXVKIy3WM+7ADCy5FVhdZYHYDdgTU0yJw== + dependencies: + is-what "^3.14.1" + +core-js@^3.15.1: + version "3.22.5" + resolved "https://registry.npmmirror.com/core-js/-/core-js-3.22.5.tgz" + integrity sha512-VP/xYuvJ0MJWRAobcmQ8F2H6Bsn+s7zqAAjFaHGBMc5AQm7zaelhD1LGduFn2EehEcQcU+br6t+fwbpQ5d1ZWA== + +csstype@^2.6.8: + version "2.6.20" + resolved "https://registry.npmmirror.com/csstype/-/csstype-2.6.20.tgz" + integrity sha512-/WwNkdXfckNgw6S5R125rrW8ez139lBHWouiBvX8dfMFtcn6V81REDqnH7+CRpRipfYlyU1CmOnOxrmGcFOjeA== + +dayjs@^1.11.0: + version "1.11.0" + resolved "https://registry.npmmirror.com/dayjs/-/dayjs-1.11.0.tgz" + integrity sha512-JLC809s6Y948/FuCZPm5IX8rRhQwOiyMb2TfVVQEixG7P8Lm/gt5S7yoQZmC8x1UehI9Pb7sksEt4xx14m+7Ug== + +debug@^3.2.6: + version "3.2.7" + resolved "https://registry.npmmirror.com/debug/-/debug-3.2.7.tgz" + integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ== + dependencies: + ms "^2.1.1" + +dom-align@^1.12.1: + version "1.12.3" + resolved "https://registry.npmmirror.com/dom-align/-/dom-align-1.12.3.tgz" + integrity sha512-Gj9hZN3a07cbR6zviMUBOMPdWxYhbMI+x+WS0NAIu2zFZmbK8ys9R79g+iG9qLnlCwpFoaB+fKy8Pdv470GsPA== + +dom-scroll-into-view@^2.0.0: + version "2.0.1" + resolved "https://registry.npmmirror.com/dom-scroll-into-view/-/dom-scroll-into-view-2.0.1.tgz" + integrity sha512-bvVTQe1lfaUr1oFzZX80ce9KLDlZ3iU+XGNE/bz9HnGdklTieqsbmsLHe+rT2XWqopvL0PckkYqN7ksmm5pe3w== + +element-plus@^2.1.9: + version "2.1.9" + resolved "https://registry.npmmirror.com/element-plus/-/element-plus-2.1.9.tgz" + integrity sha512-6mWqS3YrmJPnouWP4otzL8+MehfOnDFqDbcIdnmC07p+Z0JkWe/CVKc4Wky8AYC8nyDMUQyiZYvooCbqGuM7pg== + dependencies: + "@ctrl/tinycolor" "^3.4.0" + "@element-plus/icons-vue" "^1.1.4" + "@floating-ui/dom" "^0.4.2" + "@popperjs/core" "^2.11.4" + "@types/lodash" "^4.14.181" + "@types/lodash-es" "^4.17.6" + "@vueuse/core" "^8.2.4" + async-validator "^4.0.7" + dayjs "^1.11.0" + escape-html "^1.0.3" + lodash "^4.17.21" + lodash-es "^4.17.21" + lodash-unified "^1.0.2" + memoize-one "^6.0.0" + normalize-wheel-es "^1.1.2" + +errno@^0.1.1: + version "0.1.8" + resolved "https://registry.npmmirror.com/errno/-/errno-0.1.8.tgz" + integrity sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A== + dependencies: + prr "~1.0.1" + +esbuild-android-64@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-android-64/-/esbuild-android-64-0.14.36.tgz#fc5f95ce78c8c3d790fa16bc71bd904f2bb42aa1" + integrity sha512-jwpBhF1jmo0tVCYC/ORzVN+hyVcNZUWuozGcLHfod0RJCedTDTvR4nwlTXdx1gtncDqjk33itjO+27OZHbiavw== + +esbuild-android-arm64@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-android-arm64/-/esbuild-android-arm64-0.14.36.tgz#44356fbb9f8de82a5cdf11849e011dfb3ad0a8a8" + integrity sha512-/hYkyFe7x7Yapmfv4X/tBmyKnggUmdQmlvZ8ZlBnV4+PjisrEhAvC3yWpURuD9XoB8Wa1d5dGkTsF53pIvpjsg== + +esbuild-darwin-64@0.14.36: + version "0.14.36" + resolved "https://registry.npmmirror.com/esbuild-darwin-64/-/esbuild-darwin-64-0.14.36.tgz" + integrity sha512-kkl6qmV0dTpyIMKagluzYqlc1vO0ecgpviK/7jwPbRDEv5fejRTaBBEE2KxEQbTHcLhiiDbhG7d5UybZWo/1zQ== + +esbuild-darwin-arm64@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-darwin-arm64/-/esbuild-darwin-arm64-0.14.36.tgz#2a8040c2e465131e5281034f3c72405e643cb7b2" + integrity sha512-q8fY4r2Sx6P0Pr3VUm//eFYKVk07C5MHcEinU1BjyFnuYz4IxR/03uBbDwluR6ILIHnZTE7AkTUWIdidRi1Jjw== + +esbuild-freebsd-64@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-freebsd-64/-/esbuild-freebsd-64-0.14.36.tgz#d82c387b4d01fe9e8631f97d41eb54f2dbeb68a3" + integrity sha512-Hn8AYuxXXRptybPqoMkga4HRFE7/XmhtlQjXFHoAIhKUPPMeJH35GYEUWGbjteai9FLFvBAjEAlwEtSGxnqWww== + +esbuild-freebsd-arm64@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-freebsd-arm64/-/esbuild-freebsd-arm64-0.14.36.tgz#e8ce2e6c697da6c7ecd0cc0ac821d47c5ab68529" + integrity sha512-S3C0attylLLRiCcHiJd036eDEMOY32+h8P+jJ3kTcfhJANNjP0TNBNL30TZmEdOSx/820HJFgRrqpNAvTbjnDA== + +esbuild-linux-32@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-linux-32/-/esbuild-linux-32-0.14.36.tgz#a4a261e2af91986ea62451f2db712a556cb38a15" + integrity sha512-Eh9OkyTrEZn9WGO4xkI3OPPpUX7p/3QYvdG0lL4rfr73Ap2HAr6D9lP59VMF64Ex01LhHSXwIsFG/8AQjh6eNw== + +esbuild-linux-64@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-linux-64/-/esbuild-linux-64-0.14.36.tgz#4a9500f9197e2c8fcb884a511d2c9d4c2debde72" + integrity sha512-vFVFS5ve7PuwlfgoWNyRccGDi2QTNkQo/2k5U5ttVD0jRFaMlc8UQee708fOZA6zTCDy5RWsT5MJw3sl2X6KDg== + +esbuild-linux-arm64@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-linux-arm64/-/esbuild-linux-arm64-0.14.36.tgz#c91c21e25b315464bd7da867365dd1dae14ca176" + integrity sha512-24Vq1M7FdpSmaTYuu1w0Hdhiqkbto1I5Pjyi+4Cdw5fJKGlwQuw+hWynTcRI/cOZxBcBpP21gND7W27gHAiftw== + +esbuild-linux-arm@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-linux-arm/-/esbuild-linux-arm-0.14.36.tgz#90e23bca2e6e549affbbe994f80ba3bb6c4d934a" + integrity sha512-NhgU4n+NCsYgt7Hy61PCquEz5aevI6VjQvxwBxtxrooXsxt5b2xtOUXYZe04JxqQo+XZk3d1gcr7pbV9MAQ/Lg== + +esbuild-linux-mips64le@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-linux-mips64le/-/esbuild-linux-mips64le-0.14.36.tgz#40e11afb08353ff24709fc89e4db0f866bc131d2" + integrity sha512-hZUeTXvppJN+5rEz2EjsOFM9F1bZt7/d2FUM1lmQo//rXh1RTFYzhC0txn7WV0/jCC7SvrGRaRz0NMsRPf8SIA== + +esbuild-linux-ppc64le@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-linux-ppc64le/-/esbuild-linux-ppc64le-0.14.36.tgz#9e8a588c513d06cc3859f9dcc52e5fdfce8a1a5e" + integrity sha512-1Bg3QgzZjO+QtPhP9VeIBhAduHEc2kzU43MzBnMwpLSZ890azr4/A9Dganun8nsqD/1TBcqhId0z4mFDO8FAvg== + +esbuild-linux-riscv64@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-linux-riscv64/-/esbuild-linux-riscv64-0.14.36.tgz#e578c09b23b3b97652e60e3692bfda628b541f06" + integrity sha512-dOE5pt3cOdqEhaufDRzNCHf5BSwxgygVak9UR7PH7KPVHwSTDAZHDoEjblxLqjJYpc5XaU9+gKJ9F8mp9r5I4A== + +esbuild-linux-s390x@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-linux-s390x/-/esbuild-linux-s390x-0.14.36.tgz#3c9dab40d0d69932ffded0fd7317bb403626c9bc" + integrity sha512-g4FMdh//BBGTfVHjF6MO7Cz8gqRoDPzXWxRvWkJoGroKA18G9m0wddvPbEqcQf5Tbt2vSc1CIgag7cXwTmoTXg== + +esbuild-netbsd-64@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-netbsd-64/-/esbuild-netbsd-64-0.14.36.tgz#e27847f6d506218291619b8c1e121ecd97628494" + integrity sha512-UB2bVImxkWk4vjnP62ehFNZ73lQY1xcnL5ZNYF3x0AG+j8HgdkNF05v67YJdCIuUJpBuTyCK8LORCYo9onSW+A== + +esbuild-openbsd-64@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-openbsd-64/-/esbuild-openbsd-64-0.14.36.tgz#c94c04c557fae516872a586eae67423da6d2fabb" + integrity sha512-NvGB2Chf8GxuleXRGk8e9zD3aSdRO5kLt9coTQbCg7WMGXeX471sBgh4kSg8pjx0yTXRt0MlrUDnjVYnetyivg== + +esbuild-sunos-64@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-sunos-64/-/esbuild-sunos-64-0.14.36.tgz#9b79febc0df65a30f1c9bd63047d1675511bf99d" + integrity sha512-VkUZS5ftTSjhRjuRLp+v78auMO3PZBXu6xl4ajomGenEm2/rGuWlhFSjB7YbBNErOchj51Jb2OK8lKAo8qdmsQ== + +esbuild-windows-32@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-windows-32/-/esbuild-windows-32-0.14.36.tgz#910d11936c8d2122ffdd3275e5b28d8a4e1240ec" + integrity sha512-bIar+A6hdytJjZrDxfMBUSEHHLfx3ynoEZXx/39nxy86pX/w249WZm8Bm0dtOAByAf4Z6qV0LsnTIJHiIqbw0w== + +esbuild-windows-64@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-windows-64/-/esbuild-windows-64-0.14.36.tgz#21b4ce8b42a4efc63f4b58ec617f1302448aad26" + integrity sha512-+p4MuRZekVChAeueT1Y9LGkxrT5x7YYJxYE8ZOTcEfeUUN43vktSn6hUNsvxzzATrSgq5QqRdllkVBxWZg7KqQ== + +esbuild-windows-arm64@0.14.36: + version "0.14.36" + resolved "https://registry.yarnpkg.com/esbuild-windows-arm64/-/esbuild-windows-arm64-0.14.36.tgz#ba21546fecb7297667d0052d00150de22c044b24" + integrity sha512-fBB4WlDqV1m18EF/aheGYQkQZHfPHiHJSBYzXIo8yKehek+0BtBwo/4PNwKGJ5T0YK0oc8pBKjgwPbzSrPLb+Q== + +esbuild@^0.14.27: + version "0.14.36" + resolved "https://registry.npmmirror.com/esbuild/-/esbuild-0.14.36.tgz" + integrity sha512-HhFHPiRXGYOCRlrhpiVDYKcFJRdO0sBElZ668M4lh2ER0YgnkLxECuFe7uWCf23FrcLc59Pqr7dHkTqmRPDHmw== + optionalDependencies: + esbuild-android-64 "0.14.36" + esbuild-android-arm64 "0.14.36" + esbuild-darwin-64 "0.14.36" + esbuild-darwin-arm64 "0.14.36" + esbuild-freebsd-64 "0.14.36" + esbuild-freebsd-arm64 "0.14.36" + esbuild-linux-32 "0.14.36" + esbuild-linux-64 "0.14.36" + esbuild-linux-arm "0.14.36" + esbuild-linux-arm64 "0.14.36" + esbuild-linux-mips64le "0.14.36" + esbuild-linux-ppc64le "0.14.36" + esbuild-linux-riscv64 "0.14.36" + esbuild-linux-s390x "0.14.36" + esbuild-netbsd-64 "0.14.36" + esbuild-openbsd-64 "0.14.36" + esbuild-sunos-64 "0.14.36" + esbuild-windows-32 "0.14.36" + esbuild-windows-64 "0.14.36" + esbuild-windows-arm64 "0.14.36" + +escape-html@^1.0.3: + version "1.0.3" + resolved "https://registry.npmmirror.com/escape-html/-/escape-html-1.0.3.tgz" + integrity sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow== + +estree-walker@^2.0.2: + version "2.0.2" + resolved "https://registry.npmmirror.com/estree-walker/-/estree-walker-2.0.2.tgz" + integrity sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w== + +follow-redirects@^1.14.8: + version "1.14.9" + resolved "https://registry.npmmirror.com/follow-redirects/-/follow-redirects-1.14.9.tgz" + integrity sha512-MQDfihBQYMcyy5dhRDJUHcw7lb2Pv/TuE6xP1vyraLukNDHKbDxDNaOE3NbCAdKQApno+GPRyo1YAp89yCjK4w== + +fsevents@~2.3.2: + version "2.3.2" + resolved "https://registry.npmmirror.com/fsevents/-/fsevents-2.3.2.tgz" + integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== + +function-bind@^1.1.1: + version "1.1.1" + resolved "https://registry.npmmirror.com/function-bind/-/function-bind-1.1.1.tgz" + integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== + +graceful-fs@^4.1.2: + version "4.2.10" + resolved "https://registry.npmmirror.com/graceful-fs/-/graceful-fs-4.2.10.tgz" + integrity sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA== + +has@^1.0.3: + version "1.0.3" + resolved "https://registry.npmmirror.com/has/-/has-1.0.3.tgz" + integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw== + dependencies: + function-bind "^1.1.1" + +iconv-lite@^0.4.4: + version "0.4.24" + resolved "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.4.24.tgz" + integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== + dependencies: + safer-buffer ">= 2.1.2 < 3" + +image-size@~0.5.0: + version "0.5.5" + resolved "https://registry.npmmirror.com/image-size/-/image-size-0.5.5.tgz" + integrity sha512-6TDAlDPZxUFCv+fuOkIoXT/V/f3Qbq8e37p+YOiYrUv3v9cc3/6x78VdfPgFVaB9dZYeLUfKgHRebpkm/oP2VQ== + +is-core-module@^2.8.1: + version "2.8.1" + resolved "https://registry.npmmirror.com/is-core-module/-/is-core-module-2.8.1.tgz" + integrity sha512-SdNCUs284hr40hFTFP6l0IfZ/RSrMXF3qgoRHd3/79unUTvrFO/JoXwkGm+5J/Oe3E/b5GsnG330uUNgRpu1PA== + dependencies: + has "^1.0.3" + +is-plain-object@3.0.1: + version "3.0.1" + resolved "https://registry.npmmirror.com/is-plain-object/-/is-plain-object-3.0.1.tgz" + integrity sha512-Xnpx182SBMrr/aBik8y+GuR4U1L9FqMSojwDQwPMmxyC6bvEqly9UBCxhauBF5vNh2gwWJNX6oDV7O+OM4z34g== + +is-what@^3.14.1: + version "3.14.1" + resolved "https://registry.npmmirror.com/is-what/-/is-what-3.14.1.tgz" + integrity sha512-sNxgpk9793nzSs7bA6JQJGeIuRBQhAaNGG77kzYQgMkrID+lS6SlK07K5LaptscDlSaIgH+GPFzf+d75FVxozA== + +js-audio-recorder@0.5.7: + version "0.5.7" + resolved "https://registry.npmmirror.com/js-audio-recorder/-/js-audio-recorder-0.5.7.tgz" + integrity sha512-DIlv30N86AYHr7zGHN0O7V/3Rd8Q6SIJ/MBzVJaT9STWTdhF4E/8fxCX6ZMgRSv8xmx6fEqcFFNPoofmxJD4+A== + +"js-tokens@^3.0.0 || ^4.0.0": + version "4.0.0" + resolved "https://registry.npmmirror.com/js-tokens/-/js-tokens-4.0.0.tgz" + integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== + +lamejs@^1.2.1: + version "1.2.1" + resolved "https://registry.npmmirror.com/lamejs/-/lamejs-1.2.1.tgz" + integrity sha512-s7bxvjvYthw6oPLCm5pFxvA84wUROODB8jEO2+CE1adhKgrIvVOlmMgY8zyugxGrvRaDHNJanOiS21/emty6dQ== + dependencies: + use-strict "1.0.1" + +less@^4.1.2: + version "4.1.2" + resolved "https://registry.npmmirror.com/less/-/less-4.1.2.tgz" + integrity sha512-EoQp/Et7OSOVu0aJknJOtlXZsnr8XE8KwuzTHOLeVSEx8pVWUICc8Q0VYRHgzyjX78nMEyC/oztWFbgyhtNfDA== + dependencies: + copy-anything "^2.0.1" + parse-node-version "^1.0.1" + tslib "^2.3.0" + optionalDependencies: + errno "^0.1.1" + graceful-fs "^4.1.2" + image-size "~0.5.0" + make-dir "^2.1.0" + mime "^1.4.1" + needle "^2.5.2" + source-map "~0.6.0" + +lodash-es@^4.17.15, lodash-es@^4.17.21: + version "4.17.21" + resolved "https://registry.npmmirror.com/lodash-es/-/lodash-es-4.17.21.tgz" + integrity sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw== + +lodash-unified@^1.0.2: + version "1.0.2" + resolved "https://registry.npmmirror.com/lodash-unified/-/lodash-unified-1.0.2.tgz" + integrity sha512-OGbEy+1P+UT26CYi4opY4gebD8cWRDxAT6MAObIVQMiqYdxZr1g3QHWCToVsm31x2NkLS4K3+MC2qInaRMa39g== + +lodash@^4.17.21: + version "4.17.21" + resolved "https://registry.npmmirror.com/lodash/-/lodash-4.17.21.tgz" + integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== + +loose-envify@^1.0.0: + version "1.4.0" + resolved "https://registry.npmmirror.com/loose-envify/-/loose-envify-1.4.0.tgz" + integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== + dependencies: + js-tokens "^3.0.0 || ^4.0.0" + +magic-string@^0.25.7: + version "0.25.9" + resolved "https://registry.npmmirror.com/magic-string/-/magic-string-0.25.9.tgz" + integrity sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ== + dependencies: + sourcemap-codec "^1.4.8" + +make-dir@^2.1.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/make-dir/-/make-dir-2.1.0.tgz" + integrity sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA== + dependencies: + pify "^4.0.1" + semver "^5.6.0" + +memoize-one@^6.0.0: + version "6.0.0" + resolved "https://registry.npmmirror.com/memoize-one/-/memoize-one-6.0.0.tgz" + integrity sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw== + +mime@^1.4.1: + version "1.6.0" + resolved "https://registry.npmmirror.com/mime/-/mime-1.6.0.tgz" + integrity sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg== + +moment@^2.27.0: + version "2.29.3" + resolved "https://registry.npmmirror.com/moment/-/moment-2.29.3.tgz" + integrity sha512-c6YRvhEo//6T2Jz/vVtYzqBzwvPT95JBQ+smCytzf7c50oMZRsR/a4w88aD34I+/QVSfnoAnSBFPJHItlOMJVw== + +ms@^2.1.1: + version "2.1.3" + resolved "https://registry.npmmirror.com/ms/-/ms-2.1.3.tgz" + integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== + +nanoid@^3.3.1: + version "3.3.2" + resolved "https://registry.npmmirror.com/nanoid/-/nanoid-3.3.2.tgz" + integrity sha512-CuHBogktKwpm5g2sRgv83jEy2ijFzBwMoYA60orPDR7ynsLijJDqgsi4RDGj3OJpy3Ieb+LYwiRmIOGyytgITA== + +nanopop@^2.1.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/nanopop/-/nanopop-2.1.0.tgz" + integrity sha512-jGTwpFRexSH+fxappnGQtN9dspgE2ipa1aOjtR24igG0pv6JCxImIAmrLRHX+zUF5+1wtsFVbKyfP51kIGAVNw== + +needle@^2.5.2: + version "2.9.1" + resolved "https://registry.npmmirror.com/needle/-/needle-2.9.1.tgz" + integrity sha512-6R9fqJ5Zcmf+uYaFgdIHmLwNldn5HbK8L5ybn7Uz+ylX/rnOsSp1AHcvQSrCaFN+qNM1wpymHqD7mVasEOlHGQ== + dependencies: + debug "^3.2.6" + iconv-lite "^0.4.4" + sax "^1.2.4" + +normalize-wheel-es@^1.1.2: + version "1.1.2" + resolved "https://registry.npmmirror.com/normalize-wheel-es/-/normalize-wheel-es-1.1.2.tgz" + integrity sha512-scX83plWJXYH1J4+BhAuIHadROzxX0UBF3+HuZNY2Ks8BciE7tSTQ+5JhTsvzjaO0/EJdm4JBGrfObKxFf3Png== + +omit.js@^2.0.0: + version "2.0.2" + resolved "https://registry.npmmirror.com/omit.js/-/omit.js-2.0.2.tgz" + integrity sha512-hJmu9D+bNB40YpL9jYebQl4lsTW6yEHRTroJzNLqQJYHm7c+NQnJGfZmIWh8S3q3KoaxV1aLhV6B3+0N0/kyJg== + +parse-node-version@^1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/parse-node-version/-/parse-node-version-1.0.1.tgz" + integrity sha512-3YHlOa/JgH6Mnpr05jP9eDG254US9ek25LyIxZlDItp2iJtwyaXQb57lBYLdT3MowkUFYEV2XXNAYIPlESvJlA== + +path-parse@^1.0.7: + version "1.0.7" + resolved "https://registry.npmmirror.com/path-parse/-/path-parse-1.0.7.tgz" + integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== + +picocolors@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/picocolors/-/picocolors-1.0.0.tgz" + integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== + +pify@^4.0.1: + version "4.0.1" + resolved "https://registry.npmmirror.com/pify/-/pify-4.0.1.tgz" + integrity sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g== + +postcss@^8.1.10, postcss@^8.4.12: + version "8.4.12" + resolved "https://registry.npmmirror.com/postcss/-/postcss-8.4.12.tgz" + integrity sha512-lg6eITwYe9v6Hr5CncVbK70SoioNQIq81nsaG86ev5hAidQvmOeETBqs7jm43K2F5/Ley3ytDtriImV6TpNiSg== + dependencies: + nanoid "^3.3.1" + picocolors "^1.0.0" + source-map-js "^1.0.2" + +prr@~1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/prr/-/prr-1.0.1.tgz" + integrity sha512-yPw4Sng1gWghHQWj0B3ZggWUm4qVbPwPFcRG8KyxiU7J2OHFSoEHKS+EZ3fv5l1t9CyCiop6l/ZYeWbrgoQejw== + +regenerator-runtime@^0.13.4: + version "0.13.9" + resolved "https://registry.npmmirror.com/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz" + integrity sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA== + +resize-observer-polyfill@^1.5.1: + version "1.5.1" + resolved "https://registry.npmmirror.com/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz" + integrity sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg== + +resolve@^1.22.0: + version "1.22.0" + resolved "https://registry.npmmirror.com/resolve/-/resolve-1.22.0.tgz" + integrity sha512-Hhtrw0nLeSrFQ7phPp4OOcVjLPIeMnRlr5mcnVuMe7M/7eBn98A3hmFRLoFo3DLZkivSYwhRUJTyPyWAk56WLw== + dependencies: + is-core-module "^2.8.1" + path-parse "^1.0.7" + supports-preserve-symlinks-flag "^1.0.0" + +rollup@^2.59.0: + version "2.70.1" + resolved "https://registry.npmmirror.com/rollup/-/rollup-2.70.1.tgz" + integrity sha512-CRYsI5EuzLbXdxC6RnYhOuRdtz4bhejPMSWjsFLfVM/7w/85n2szZv6yExqUXsBdz5KT8eoubeyDUDjhLHEslA== + optionalDependencies: + fsevents "~2.3.2" + +"safer-buffer@>= 2.1.2 < 3": + version "2.1.2" + resolved "https://registry.npmmirror.com/safer-buffer/-/safer-buffer-2.1.2.tgz" + integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + +sax@^1.2.4: + version "1.2.4" + resolved "https://registry.npmmirror.com/sax/-/sax-1.2.4.tgz" + integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw== + +scroll-into-view-if-needed@^2.2.25: + version "2.2.29" + resolved "https://registry.npmmirror.com/scroll-into-view-if-needed/-/scroll-into-view-if-needed-2.2.29.tgz" + integrity sha512-hxpAR6AN+Gh53AdAimHM6C8oTN1ppwVZITihix+WqalywBeFcQ6LdQP5ABNl26nX8GTEL7VT+b8lKpdqq65wXg== + dependencies: + compute-scroll-into-view "^1.0.17" + +semver@^5.6.0: + version "5.7.1" + resolved "https://registry.npmmirror.com/semver/-/semver-5.7.1.tgz" + integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== + +shallow-equal@^1.0.0: + version "1.2.1" + resolved "https://registry.npmmirror.com/shallow-equal/-/shallow-equal-1.2.1.tgz" + integrity sha512-S4vJDjHHMBaiZuT9NPb616CSmLf618jawtv3sufLl6ivK8WocjAo58cXwbRV1cgqxH0Qbv+iUt6m05eqEa2IRA== + +source-map-js@^1.0.2: + version "1.0.2" + resolved "https://registry.npmmirror.com/source-map-js/-/source-map-js-1.0.2.tgz" + integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw== + +source-map@^0.6.1, source-map@~0.6.0: + version "0.6.1" + resolved "https://registry.npmmirror.com/source-map/-/source-map-0.6.1.tgz" + integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== + +sourcemap-codec@^1.4.8: + version "1.4.8" + resolved "https://registry.npmmirror.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz" + integrity sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA== + +supports-preserve-symlinks-flag@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz" + integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== + +tslib@^2.3.0: + version "2.4.0" + resolved "https://registry.npmmirror.com/tslib/-/tslib-2.4.0.tgz" + integrity sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ== + +use-strict@1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/use-strict/-/use-strict-1.0.1.tgz" + integrity sha512-IeiWvvEXfW5ltKVMkxq6FvNf2LojMKvB2OCeja6+ct24S1XOmQw2dGr2JyndwACWAGJva9B7yPHwAmeA9QCqAQ== + +vite@^2.9.0: + version "2.9.1" + resolved "https://registry.npmmirror.com/vite/-/vite-2.9.1.tgz" + integrity sha512-vSlsSdOYGcYEJfkQ/NeLXgnRv5zZfpAsdztkIrs7AZHV8RCMZQkwjo4DS5BnrYTqoWqLoUe1Cah4aVO4oNNqCQ== + dependencies: + esbuild "^0.14.27" + postcss "^8.4.12" + resolve "^1.22.0" + rollup "^2.59.0" + optionalDependencies: + fsevents "~2.3.2" + +vue-demi@*: + version "0.12.5" + resolved "https://registry.npmmirror.com/vue-demi/-/vue-demi-0.12.5.tgz" + integrity sha512-BREuTgTYlUr0zw0EZn3hnhC3I6gPWv+Kwh4MCih6QcAeaTlaIX0DwOVN0wHej7hSvDPecz4jygy/idsgKfW58Q== + +vue-types@^3.0.0: + version "3.0.2" + resolved "https://registry.npmmirror.com/vue-types/-/vue-types-3.0.2.tgz" + integrity sha512-IwUC0Aq2zwaXqy74h4WCvFCUtoV0iSWr0snWnE9TnU18S66GAQyqQbRf2qfJtUuiFsBf6qp0MEwdonlwznlcrw== + dependencies: + is-plain-object "3.0.1" + +vue@^3.2.25: + version "3.2.32" + resolved "https://registry.npmmirror.com/vue/-/vue-3.2.32.tgz" + integrity sha512-6L3jKZApF042OgbCkh+HcFeAkiYi3Lovi8wNhWqIK98Pi5efAMLZzRHgi91v+60oIRxdJsGS9sTMsb+yDpY8Eg== + dependencies: + "@vue/compiler-dom" "3.2.32" + "@vue/compiler-sfc" "3.2.32" + "@vue/runtime-dom" "3.2.32" + "@vue/server-renderer" "3.2.32" + "@vue/shared" "3.2.32" + +warning@^4.0.0: + version "4.0.3" + resolved "https://registry.npmmirror.com/warning/-/warning-4.0.3.tgz" + integrity sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w== + dependencies: + loose-envify "^1.0.0" diff --git "a/demos/speech_web/\346\216\245\345\217\243\346\226\207\346\241\243.md" "b/demos/speech_web/\346\216\245\345\217\243\346\226\207\346\241\243.md" new file mode 100644 index 0000000000000000000000000000000000000000..a811a3f4e55284640704941cd10b888f1b0373e6 --- /dev/null +++ "b/demos/speech_web/\346\216\245\345\217\243\346\226\207\346\241\243.md" @@ -0,0 +1,406 @@ +# 接口文档 + +开启服务后可参照: + +http://0.0.0.0:8010/docs + +## ASR + +### 【POST】/asr/offline + +说明:上传16k,16bit wav文件,返回 offline 语音识别模型识别结果 + +返回: JSON + +前端接口: ASR-端到端识别,音频文件识别;语音指令-录音上传 + +示例: + +```json +{ + "code": 0, + "result": "你也喜欢这个天气吗", + "message": "ok" +} +``` + +### 【POST】/asr/offlinefile + +说明:上传16k,16bit wav文件,返回 offline 语音识别模型识别结果 + wav数据的base64 + +返回: JSON + +前端接口: 音频文件识别(播放这段base64还原后记得添加wav头,采样率16k, int16,添加后才能播放) + +示例: + +```json +{ + "code": 0, + "result": { + "asr_result": "今天天气真好", + "wav_base64": "///+//3//f/8/////v/////////////////+/wAA//8AAAEAAQACAAIAAQABAP" + }, + "message": "ok" +} +``` + + +### 【POST】/asr/collectEnv + +说明: 通过采集环境噪音,上传16k, int16 wav文件,来生成后台VAD的能量阈值, 返回阈值结果 + +前端接口:ASR-环境采样 + +返回: JSON + +```json +{ + "code": 0, + "result": 3624.93505859375, + "message": "采集环境噪音成功" +} +``` + +### 【GET】/asr/stopRecord + +说明:通过 GET 请求 /asr/stopRecord, 后台停止接收 offlineStream 中通过 WS协议 上传的数据 + +前端接口:语音聊天-暂停录音(获取NLP,播放TTS时暂停) + +返回: JSON + +```JSON +{ + "code": 0, + "result": null, + "message": "停止成功" +} +``` + +### 【GET】/asr/resumeRecord + +说明:通过 GET 请求 /asr/resumeRecord, 后台停止接收 offlineStream 中通过 WS协议 上传的数据 + +前端接口:语音聊天-恢复录音(TTS播放完毕时,告诉后台恢复录音) + +返回: JSON + +```JSON +{ + "code": 0, + "result": null, + "message": "Online录音恢复" +} +``` + +### 【Websocket】/ws/asr/offlineStream + +说明:通过 WS 协议,将前端音频持续上传到后台,前端采集 16k,Int16 类型的PCM片段,持续上传到后端 + +前端接口:语音聊天-开始录音,持续将麦克风语音传给后端,后端推送语音识别结果 + +返回:后端返回识别结果,offline模型识别结果, 由WS推送 + + +### 【Websocket】/ws/asr/onlineStream + +说明:通过 WS 协议,将前端音频持续上传到后台,前端采集 16k,Int16 类型的PCM片段,持续上传到后端 + +前端接口:ASR-流式识别开始录音,持续将麦克风语音传给后端,后端推送语音识别结果 + +返回:后端返回识别结果,online模型识别结果, 由WS推送 + +## NLP + +### 【POST】/nlp/chat + +说明:返回闲聊对话的结果 + +前端接口:语音聊天-获取到ASR识别结果后,向后端获取闲聊文本 + +上传示例: + +```json +{ + "chat": "天气非常棒" +} +``` + +返回示例: + +```json +{ + "code": 0, + "result": "是的,我也挺喜欢的", + "message": "ok" +} +``` + + +### 【POST】/nlp/ie + +说明:返回信息抽取结果 + +前端接口:语音指令-向后端获取信息抽取结果 + +上传示例: + +```json +{ + "chat": "今天我从马来西亚出发去香港花了五十万元" +} +``` + +返回示例: + +```json +{ + "code": 0, + "result": [ + { + "时间": [ + { + "text": "今天", + "start": 0, + "end": 2, + "probability": 0.9817976247505698 + } + ], + "出发地": [ + { + "text": "马来西亚", + "start": 4, + "end": 8, + "probability": 0.974892389414169 + } + ], + "目的地": [ + { + "text": "马来西亚", + "start": 4, + "end": 8, + "probability": 0.7347504438136951 + } + ], + "费用": [ + { + "text": "五十万元", + "start": 15, + "end": 19, + "probability": 0.9679076530644402 + } + ] + } + ], + "message": "ok" +} +``` + + +## TTS + +### 【POST】/tts/offline + +说明:获取TTS离线模型音频 + +前端接口:TTS-端到端合成 + +上传示例: + +```json +{ + "text": "天气非常棒" +} +``` + +返回示例:对应音频对应的 base64 编码 + +```json +{ + "code": 0, + "result": "UklGRrzQAABXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAAZGF0YZjQAAADAP7/BAADAAAA...", + "message": "ok" +} +``` + +### 【POST】/tts/online + +说明:流式获取语音合成音频 + +前端接口:流式合成 + +上传示例: +```json +{ + "text": "天气非常棒" +} + +``` + +返回示例: + +二进制PCM片段,16k Int 16类型 + +## VPR + +### 【POST】/vpr/enroll + +说明:声纹注册,通过表单上传 spk_id(字符串,非空), 与 audio (文件) + +前端接口:声纹识别-声纹注册 + +上传示例: + +```text +curl -X 'POST' \ + 'http://0.0.0.0:8010/vpr/enroll' \ + -H 'accept: application/json' \ + -H 'Content-Type: multipart/form-data' \ + -F 'spk_id=啦啦啦啦' \ + -F 'audio=@demo_16k.wav;type=audio/wav' +``` + +返回示例: + +```json +{ + "status": true, + "msg": "Successfully enroll data!" +} +``` + +### 【POST】/vpr/recog + +说明:声纹识别,识别文件,提取文件的声纹信息做比对 音频 16k, int 16 wav格式 + +前端接口:声纹识别-上传音频,返回声纹识别结果 + +上传示例: + +```shell +curl -X 'POST' \ + 'http://0.0.0.0:8010/vpr/recog' \ + -H 'accept: application/json' \ + -H 'Content-Type: multipart/form-data' \ + -F 'audio=@demo_16k.wav;type=audio/wav' +``` + +返回示例: + +```json +[ + [ + "啦啦啦啦", + [ + "", + 100 + ] + ], + [ + "test1", + [ + "", + 11.64 + ] + ], + [ + "test2", + [ + "", + 6.09 + ] + ] +] + +``` + + +### 【POST】/vpr/del + +说明: 根据 spk_id 删除用户数据 + +前端接口:声纹识别-删除用户数据 + +上传示例: +```json +{ + "spk_id":"啦啦啦啦" +} +``` + +返回示例 + +```json +{ + "status": true, + "msg": "Successfully delete data!" +} + +``` + + +### 【GET】/vpr/list + +说明:查询用户列表数据,无需参数,返回 spk_id 与 vpr_id + +前端接口:声纹识别-获取声纹数据列表 + +返回示例: + +```json +[ + [ + "test1", + "test2" + ], + [ + 9, + 10 + ] +] + +``` + + +### 【GET】/vpr/data + +说明: 根据 vpr_id 获取用户vpr时使用的音频 + +前端接口:声纹识别-获取vpr对应的音频 + +访问示例: + +```shell +curl -X 'GET' \ + 'http://0.0.0.0:8010/vpr/data?vprId=9' \ + -H 'accept: application/json' +``` + +返回示例: + +对应音频文件 + +### 【GET】/vpr/database64 + +说明: 根据 vpr_id 获取用户vpr时注册使用音频转换成 16k, int16 类型的数组,返回base64编码 + +前端接口:声纹识别-获取vpr对应的音频(注意:播放时需要添加 wav头,16k,int16, 可参考tts播放时添加wav的方式,注意更改采样率) + +访问示例: + +```shell +curl -X 'GET' \ + 'http://localhost:8010/vpr/database64?vprId=12' \ + -H 'accept: application/json' +``` + +返回示例: +```json +{ + "code": 0, + "result":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + "message": "ok" +``` + + diff --git a/demos/streaming_asr_server/.gitignore b/demos/streaming_asr_server/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..0f09019de5ceb17595729c8e601f724b2c2a6d19 --- /dev/null +++ b/demos/streaming_asr_server/.gitignore @@ -0,0 +1,2 @@ +exp + diff --git a/demos/streaming_asr_server/conf/ws_ds2_application.yaml b/demos/streaming_asr_server/conf/ws_ds2_application.yaml index d19bd26dc1b4d1a45f9fb797f9f4e749099948ad..e36a829cc48265b374d639eeefa3ae6c559f6d6f 100644 --- a/demos/streaming_asr_server/conf/ws_ds2_application.yaml +++ b/demos/streaming_asr_server/conf/ws_ds2_application.yaml @@ -7,11 +7,11 @@ host: 0.0.0.0 port: 8090 # The task format in the engin_list is: _ -# task choices = ['asr_online'] +# task choices = ['asr_online-inference', 'asr_online-onnx'] # protocol = ['websocket'] (only one can be selected). # websocket only support online engine type. protocol: 'websocket' -engine_list: ['asr_online'] +engine_list: ['asr_online-onnx'] ################################################################################# @@ -19,11 +19,11 @@ engine_list: ['asr_online'] ################################################################################# ################################### ASR ######################################### -################### speech task: asr; engine_type: online ####################### -asr_online: - model_type: 'deepspeech2online_aishell' - am_model: # the pdmodel file of am static model [optional] - am_params: # the pdiparams file of am static model [optional] +################### speech task: asr; engine_type: online-inference ####################### +asr_online-inference: + model_type: 'deepspeech2online_wenetspeech' + am_model: # the pdmodel file of am static model [optional] + am_params: # the pdiparams file of am static model [optional] lang: 'zh' sample_rate: 16000 cfg_path: @@ -38,6 +38,41 @@ asr_online: glog_info: False # True -> print glog summary: True # False -> do not show predictor config + chunk_buffer_conf: + frame_duration_ms: 85 + shift_ms: 40 + sample_rate: 16000 + sample_width: 2 + window_n: 7 # frame + shift_n: 4 # frame + window_ms: 25 # ms + shift_ms: 10 # ms + + + +################################### ASR ######################################### +################### speech task: asr; engine_type: online-onnx ####################### +asr_online-onnx: + model_type: 'deepspeech2online_wenetspeech' + am_model: # the pdmodel file of onnx am static model [optional] + am_params: # the pdiparams file of am static model [optional] + lang: 'zh' + sample_rate: 16000 + cfg_path: + decode_method: + num_decoding_left_chunks: + force_yes: True + device: 'cpu' # cpu or gpu:id + + # https://onnxruntime.ai/docs/api/python/api_summary.html#inferencesession + am_predictor_conf: + device: 'cpu' # set 'gpu:id' or 'cpu' + graph_optimization_level: 0 + intra_op_num_threads: 0 # Sets the number of threads used to parallelize the execution within nodes. + inter_op_num_threads: 0 # Sets the number of threads used to parallelize the execution of the graph (across nodes). + log_severity_level: 2 # Log severity level. Applies to session load, initialization, etc. 0:Verbose, 1:Info, 2:Warning. 3:Error, 4:Fatal. Default is 2. + log_verbosity_level: 0 # VLOG level if DEBUG build and session_log_severity_level is 0. Applies to session load, initialization, etc. Default is 0. + chunk_buffer_conf: frame_duration_ms: 80 shift_ms: 40 @@ -45,5 +80,5 @@ asr_online: sample_width: 2 window_n: 7 # frame shift_n: 4 # frame - window_ms: 20 # ms + window_ms: 25 # ms shift_ms: 10 # ms diff --git a/demos/streaming_asr_server/local/rtf_from_log.py b/demos/streaming_asr_server/local/rtf_from_log.py new file mode 100755 index 0000000000000000000000000000000000000000..a5634388bbdfbe88475f0ec4520b47ad25a9457c --- /dev/null +++ b/demos/streaming_asr_server/local/rtf_from_log.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +import argparse + +if __name__ == '__main__': + parser = argparse.ArgumentParser(prog=__doc__) + parser.add_argument( + '--logfile', type=str, required=True, help='ws client log file') + + args = parser.parse_args() + + rtfs = [] + with open(args.logfile, 'r') as f: + for line in f: + if 'RTF=' in line: + # udio duration: 6.126, elapsed time: 3.471978187561035, RTF=0.5667610492264177 + line = line.strip() + beg = line.index("audio") + line = line[beg:] + + items = line.split(',') + vals = [] + for elem in items: + if "RTF=" in elem: + continue + _, val = elem.split(":") + vals.append(eval(val)) + keys = ['T', 'P'] + meta = dict(zip(keys, vals)) + + rtfs.append(meta) + + T = 0.0 + P = 0.0 + n = 0 + for m in rtfs: + n += 1 + T += m['T'] + P += m['P'] + + print(f"RTF: {P/T}, utts: {n}") diff --git a/demos/streaming_asr_server/local/test.sh b/demos/streaming_asr_server/local/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..d70dd336fca8d76b67e0b2eccddf264038512542 --- /dev/null +++ b/demos/streaming_asr_server/local/test.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +if [ $# != 1 ];then + echo "usage: $0 wav_scp" + exit -1 +fi + +scp=$1 + +# calc RTF +# wav_scp can generate from `speechx/examples/ds2_ol/aishell` + +exp=exp +mkdir -p $exp + +python3 local/websocket_client.py --server_ip 127.0.0.1 --port 8090 --wavscp $scp &> $exp/log.rsl + +python3 local/rtf_from_log.py --logfile $exp/log.rsl + + + \ No newline at end of file diff --git a/demos/streaming_asr_server/websocket_client.py b/demos/streaming_asr_server/local/websocket_client.py similarity index 89% rename from demos/streaming_asr_server/websocket_client.py rename to demos/streaming_asr_server/local/websocket_client.py index 8e1f19a58820b8ce62f466d177719bdb6c458f55..51ae7a2f45591b60c28bd77d611401995682b909 100644 --- a/demos/streaming_asr_server/websocket_client.py +++ b/demos/streaming_asr_server/local/websocket_client.py @@ -1,3 +1,4 @@ +#!/usr/bin/python # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -11,9 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -#!/usr/bin/python -# -*- coding: UTF-8 -*- -# script for calc RTF: grep -rn RTF log.txt | awk '{print $NF}' | awk -F "=" '{sum += $NF} END {print "all time",sum, "audio num", NR, "RTF", sum/NR}' +# calc avg RTF(NOT Accurate): grep -rn RTF log.txt | awk '{print $NF}' | awk -F "=" '{sum += $NF} END {print "all time",sum, "audio num", NR, "RTF", sum/NR}' +# python3 websocket_client.py --server_ip 127.0.0.1 --port 8290 --punc.server_ip 127.0.0.1 --punc.port 8190 --wavfile ./zh.wav +# python3 websocket_client.py --server_ip 127.0.0.1 --port 8290 --wavfile ./zh.wav import argparse import asyncio import codecs diff --git a/demos/streaming_asr_server/test.sh b/demos/streaming_asr_server/test.sh index f3075454d6ccda411bf024d354b693b3625aa1fe..67a5ec4c5023fc49d4585b608cabb87d88255811 100755 --- a/demos/streaming_asr_server/test.sh +++ b/demos/streaming_asr_server/test.sh @@ -3,11 +3,9 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav # read the wav and pass it to only streaming asr service # If `127.0.0.1` is not accessible, you need to use the actual service IP address. -# python3 websocket_client.py --server_ip 127.0.0.1 --port 8290 --wavfile ./zh.wav -paddlespeech_client asr_online --server_ip 127.0.0.1 --port 8290 --input ./zh.wav +paddlespeech_client asr_online --server_ip 127.0.0.1 --port 8090 --input ./zh.wav # read the wav and call streaming and punc service # If `127.0.0.1` is not accessible, you need to use the actual service IP address. -# python3 websocket_client.py --server_ip 127.0.0.1 --port 8290 --punc.server_ip 127.0.0.1 --punc.port 8190 --wavfile ./zh.wav paddlespeech_client asr_online --server_ip 127.0.0.1 --port 8290 --punc.server_ip 127.0.0.1 --punc.port 8190 --input ./zh.wav diff --git a/docs/source/install.md b/docs/source/install.md index e3ea74b2741e2c486facd4566e03e579adffa27e..ac48d88ba8258e3ccb2c9392f7d93669914be8e3 100644 --- a/docs/source/install.md +++ b/docs/source/install.md @@ -139,28 +139,13 @@ pip install . -i https://pypi.tuna.tsinghua.edu.cn/simple To avoid the trouble of environment setup, running in a Docker container is highly recommended. Otherwise, if you work on `Ubuntu` with `root` privilege, you can still complete the installation. ### Choice 1: Running in Docker Container (Recommend) -Docker is an open-source tool to build, ship, and run distributed applications in an isolated environment. A Docker image for this project has been provided in [hub.docker.com](https://hub.docker.com) with dependencies of cuda and cudnn installed. This Docker image requires the support of NVIDIA GPU, so please make sure its availability and the [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) has been installed. +Docker is an open-source tool to build, ship, and run distributed applications in an isolated environment. If you do not have a Docker environment, please refer to [Docker](https://www.docker.com/). If you will use GPU version, you also need to install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker). -Take several steps to launch the Docker image: -- Download the Docker image +We provide docker images containing the latest PaddleSpeech code, and all environment and package dependencies are pre-installed. All you have to do is to **pull and run the docker image**. Then you can enjoy PaddleSpeech without any extra steps. -For example, pull paddle 2.2.0 image: -```bash -sudo nvidia-docker pull registry.baidubce.com/paddlepaddle/paddle:2.2.0-gpu-cuda10.2-cudnn7 -``` -- Clone this repository -```bash -git clone https://github.com/PaddlePaddle/PaddleSpeech.git -``` -- Run the Docker image -```bash -sudo nvidia-docker run --net=host --ipc=host --rm -it -v $(pwd)/PaddleSpeech:/PaddleSpeech registry.baidubce.com/paddlepaddle/paddle:2.2.0-gpu-cuda10.2-cudnn7 /bin/bash -``` -- Enter PaddleSpeech directory. -```bash -cd /PaddleSpeech -``` -Now you can execute training, inference, and hyper-parameters tuning in Docker container. +Get these images and guidance in [docker hub](https://hub.docker.com/repository/docker/paddlecloud/paddlespeech), including CPU, GPU, ROCm environment versions. + +If you have some customized requirements about automatic building docker images, you can get it in github repo [PaddlePaddle/PaddleCloud](https://github.com/PaddlePaddle/PaddleCloud/tree/main/tekton). ### Choice 2: Running in Ubuntu with Root Privilege - Install `build-essential` by apt diff --git a/docs/source/install_cn.md b/docs/source/install_cn.md index 5a967f404b5c04dd90f3cc873a8a81a1874902b7..345e79bb5dd8fd4d089c873cd42f7de8444088ba 100644 --- a/docs/source/install_cn.md +++ b/docs/source/install_cn.md @@ -130,26 +130,14 @@ pip install . -i https://pypi.tuna.tsinghua.edu.cn/simple - 选择 2: 使用`Ubuntu` ,并且拥有 root 权限。 为了避免各种环境配置问题,我们非常推荐你使用 docker 容器。如果你不想使用 docker,但是可以使用拥有 root 权限的 Ubuntu 系统,你也可以完成**困难**方式的安装。 -### 选择1: 使用Docker容器(推荐) -Docker 是一种开源工具,用于在和系统本身环境相隔离的环境中构建、发布和运行各类应用程序。你可以访问 [hub.docker.com](https://hub.docker.com) 来下载各种版本的 docker,目前已经有适用于 `PaddleSpeech` 的 docker 提供在了该网站上。Docker 镜像需要使用 Nvidia GPU,所以你也需要提前安装好 [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) 。 -你需要完成几个步骤来启动docker: -- 下载 docker 镜像: - 例如,拉取 paddle2.2.0 镜像: -```bash -sudo nvidia-docker pull registry.baidubce.com/paddlepaddle/paddle:2.2.0-gpu-cuda10.2-cudnn7 -``` -- 克隆 `PaddleSpeech` 仓库 -```bash -git clone https://github.com/PaddlePaddle/PaddleSpeech.git -``` -- 启动 docker 镜像 -```bash -sudo nvidia-docker run --net=host --ipc=host --rm -it -v $(pwd)/PaddleSpeech:/PaddleSpeech registry.baidubce.com/paddlepaddle/paddle:2.2.0-gpu-cuda10.2-cudnn7 /bin/bash -``` -- 进入 PaddleSpeech 目录 -```bash -cd /PaddleSpeech -``` +### 选择1: 使用 Docker 容器(推荐) +Docker 是一种开源工具,用于在和系统本身环境相隔离的环境中构建、发布和运行各类应用程序。如果您没有 Docker 运行环境,请参考 [Docker 官网](https://www.docker.com/)进行安装,如果您准备使用 GPU 版本镜像,还需要提前安装好 [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) 。 + +我们提供了包含最新 PaddleSpeech 代码的 docker 镜像,并预先安装好了所有的环境和库依赖,您只需要**拉取并运行 docker 镜像**,无需其他任何额外操作,即可开始享用 PaddleSpeech 的所有功能。 + +在 [Docker Hub](https://hub.docker.com/repository/docker/paddlecloud/paddlespeech) 中获取这些镜像及相应的使用指南,包括 CPU、GPU、ROCm 版本。 + +如果您对自动化制作docker镜像感兴趣,或有自定义需求,请访问 [PaddlePaddle/PaddleCloud](https://github.com/PaddlePaddle/PaddleCloud/tree/main/tekton) 做进一步了解。 完成这些以后,你就可以在 docker 容器中执行训练、推理和超参 fine-tune。 ### 选择2: 使用有 root 权限的 Ubuntu - 使用apt安装 `build-essential` diff --git a/examples/csmsc/vits/conf/default.yaml b/examples/csmsc/vits/conf/default.yaml index 47af780dc656533c147380b4b8b92ccf3a616076..32f995cc9489359bc91bb951442c5fde78286724 100644 --- a/examples/csmsc/vits/conf/default.yaml +++ b/examples/csmsc/vits/conf/default.yaml @@ -178,6 +178,8 @@ generator_first: False # whether to start updating generator first ########################################################## # OTHER TRAINING SETTING # ########################################################## -max_epoch: 1000 # number of epochs -num_snapshots: 10 # max number of snapshots to keep while training -seed: 777 # random seed number +num_snapshots: 10 # max number of snapshots to keep while training +train_max_steps: 250000 # Number of training steps. == total_iters / ngpus, total_iters = 1000000 +save_interval_steps: 1000 # Interval steps to save checkpoint. +eval_interval_steps: 250 # Interval steps to evaluate the network. +seed: 777 # random seed number diff --git a/examples/csmsc/vits/local/preprocess.sh b/examples/csmsc/vits/local/preprocess.sh index 1d3ae59376499add5ef8479499254beada6df642..1cd6d1f9b0c0fa3f47b088195bf76c0d6d08f48b 100755 --- a/examples/csmsc/vits/local/preprocess.sh +++ b/examples/csmsc/vits/local/preprocess.sh @@ -4,6 +4,7 @@ stage=0 stop_stage=100 config_path=$1 +add_blank=$2 if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then # get durations from MFA's result @@ -44,6 +45,7 @@ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then --feats-stats=dump/train/feats_stats.npy \ --phones-dict=dump/phone_id_map.txt \ --speaker-dict=dump/speaker_id_map.txt \ + --add-blank=${add_blank} \ --skip-wav-copy python3 ${BIN_DIR}/normalize.py \ @@ -52,6 +54,7 @@ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then --feats-stats=dump/train/feats_stats.npy \ --phones-dict=dump/phone_id_map.txt \ --speaker-dict=dump/speaker_id_map.txt \ + --add-blank=${add_blank} \ --skip-wav-copy python3 ${BIN_DIR}/normalize.py \ @@ -60,5 +63,6 @@ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then --feats-stats=dump/train/feats_stats.npy \ --phones-dict=dump/phone_id_map.txt \ --speaker-dict=dump/speaker_id_map.txt \ + --add-blank=${add_blank} \ --skip-wav-copy fi diff --git a/examples/csmsc/vits/local/synthesize_e2e.sh b/examples/csmsc/vits/local/synthesize_e2e.sh index edbb07bfc803ccd558477977143bdbe53280fc62..3f3bf6517a415020961eeef70ace4921d8062ee9 100755 --- a/examples/csmsc/vits/local/synthesize_e2e.sh +++ b/examples/csmsc/vits/local/synthesize_e2e.sh @@ -3,9 +3,12 @@ config_path=$1 train_output_path=$2 ckpt_name=$3 +add_blank=$4 + stage=0 stop_stage=0 + if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ @@ -14,5 +17,6 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then --ckpt=${train_output_path}/checkpoints/${ckpt_name} \ --phones_dict=dump/phone_id_map.txt \ --output_dir=${train_output_path}/test_e2e \ - --text=${BIN_DIR}/../sentences.txt + --text=${BIN_DIR}/../sentences.txt \ + --add-blank=${add_blank} fi diff --git a/examples/csmsc/vits/run.sh b/examples/csmsc/vits/run.sh index 80e56e7c146ae02b6b7b9ff23159cb746a8088b6..c284b7b238cfc528277909f297d7cbb10a273299 100755 --- a/examples/csmsc/vits/run.sh +++ b/examples/csmsc/vits/run.sh @@ -10,6 +10,7 @@ stop_stage=100 conf_path=conf/default.yaml train_output_path=exp/default ckpt_name=snapshot_iter_153.pdz +add_blank=true # with the following command, you can choose the stage range you want to run # such as `./run.sh --stage 0 --stop-stage 0` @@ -18,7 +19,7 @@ source ${MAIN_ROOT}/utils/parse_options.sh || exit 1 if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then # prepare data - ./local/preprocess.sh ${conf_path} || exit -1 + ./local/preprocess.sh ${conf_path} ${add_blank}|| exit -1 fi if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then @@ -32,5 +33,5 @@ fi if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then # synthesize_e2e, vocoder is pwgan - CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} ${add_blank}|| exit -1 fi diff --git a/examples/ljspeech/voc0/local/synthesize.sh b/examples/ljspeech/voc0/local/synthesize.sh index 1d5e11836aa3647cc9d93d86c25403cfb37d5a39..11874e4991ba7cd45a6ac356086a31707b4109f3 100755 --- a/examples/ljspeech/voc0/local/synthesize.sh +++ b/examples/ljspeech/voc0/local/synthesize.sh @@ -8,5 +8,4 @@ python ${BIN_DIR}/synthesize.py \ --input=${input_mel_path} \ --output=${train_output_path}/wavs/ \ --checkpoint_path=${train_output_path}/checkpoints/${ckpt_name} \ - --ngpu=1 \ - --verbose \ No newline at end of file + --ngpu=1 \ No newline at end of file diff --git a/paddlespeech/audio/utils/__init__.py b/paddlespeech/audio/utils/__init__.py index 742f9f8efdf50b5992712fa7f2d48b0a16902dd5..f1e5deb0ac516cce6a1854140c5195060142ad66 100644 --- a/paddlespeech/audio/utils/__init__.py +++ b/paddlespeech/audio/utils/__init__.py @@ -11,8 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from ...cli.utils import DATA_HOME -from ...cli.utils import MODEL_HOME +from ...utils.env import DATA_HOME +from ...utils.env import MODEL_HOME from .download import decompress from .download import download_and_decompress from .download import load_state_dict_from_url diff --git a/paddlespeech/cli/asr/infer.py b/paddlespeech/cli/asr/infer.py index 00cad150ed8c9431a65cdbb6586a56bef56a9d5b..24839a8988ca8adcde8ad015f8abdd0ff4a3b9d9 100644 --- a/paddlespeech/cli/asr/infer.py +++ b/paddlespeech/cli/asr/infer.py @@ -26,11 +26,11 @@ import paddle import soundfile from yacs.config import CfgNode +from ...utils.env import MODEL_HOME from ..download import get_path_from_url from ..executor import BaseExecutor from ..log import logger from ..utils import CLI_TIMER -from ..utils import MODEL_HOME from ..utils import stats_wrapper from ..utils import timer_register from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer @@ -187,7 +187,7 @@ class ASRExecutor(BaseExecutor): elif "conformer" in model_type or "transformer" in model_type: self.config.decode.decoding_method = decode_method if num_decoding_left_chunks: - assert num_decoding_left_chunks == -1 or num_decoding_left_chunks >= 0, f"num_decoding_left_chunks should be -1 or >=0" + assert num_decoding_left_chunks == -1 or num_decoding_left_chunks >= 0, "num_decoding_left_chunks should be -1 or >=0" self.config.num_decoding_left_chunks = num_decoding_left_chunks else: diff --git a/paddlespeech/cli/st/infer.py b/paddlespeech/cli/st/infer.py index e1ce181af351c4bf651a913d2de7005c5dc37e51..4e099c4021eca94bfea64eaefcd66267136eada1 100644 --- a/paddlespeech/cli/st/infer.py +++ b/paddlespeech/cli/st/infer.py @@ -26,10 +26,10 @@ import soundfile from kaldiio import WriteHelper from yacs.config import CfgNode +from ...utils.env import MODEL_HOME from ..executor import BaseExecutor from ..log import logger from ..utils import download_and_decompress -from ..utils import MODEL_HOME from ..utils import stats_wrapper from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer from paddlespeech.s2t.utils.utility import UpdateConfig diff --git a/paddlespeech/cli/utils.py b/paddlespeech/cli/utils.py index 21c887e997c6e294431654b35b747bab44cffa78..60f56f424f1663eed4765660574ce320ab9fa2d3 100644 --- a/paddlespeech/cli/utils.py +++ b/paddlespeech/cli/utils.py @@ -25,11 +25,12 @@ from typing import Dict import paddle import requests +import soundfile as sf import yaml from paddle.framework import load -import paddlespeech.audio from . import download +from ..utils.env import CONF_HOME from .entry import commands try: from .. import __version__ @@ -161,38 +162,6 @@ def load_state_dict_from_url(url: str, path: str, md5: str=None) -> os.PathLike: return load(os.path.join(path, os.path.basename(url))) -def _get_user_home(): - return os.path.expanduser('~') - - -def _get_paddlespcceh_home(): - if 'PPSPEECH_HOME' in os.environ: - home_path = os.environ['PPSPEECH_HOME'] - if os.path.exists(home_path): - if os.path.isdir(home_path): - return home_path - else: - raise RuntimeError( - 'The environment variable PPSPEECH_HOME {} is not a directory.'. - format(home_path)) - else: - return home_path - return os.path.join(_get_user_home(), '.paddlespeech') - - -def _get_sub_home(directory): - home = os.path.join(_get_paddlespcceh_home(), directory) - if not os.path.exists(home): - os.makedirs(home) - return home - - -PPSPEECH_HOME = _get_paddlespcceh_home() -MODEL_HOME = _get_sub_home('models') -CONF_HOME = _get_sub_home('conf') -DATA_HOME = _get_sub_home('datasets') - - def _md5(text: str): '''Calculate the md5 value of the input text.''' md5code = hashlib.md5(text.encode()) @@ -282,7 +251,8 @@ def _note_one_stat(cls_name, params={}): if 'audio_file' in params: try: - _, sr = paddlespeech.audio.load(params['audio_file']) + # recursive import cased by: utils.DATA_HOME + _, sr = sf.read(params['audio_file']) except Exception: sr = -1 diff --git a/paddlespeech/cls/models/panns/panns.py b/paddlespeech/cls/models/panns/panns.py index 4befe7aa4fe06682c17ea0264a18aee728dbb1fc..37deae80c847757d716acf123f037b60d7fd3d91 100644 --- a/paddlespeech/cls/models/panns/panns.py +++ b/paddlespeech/cls/models/panns/panns.py @@ -16,8 +16,8 @@ import os import paddle.nn as nn import paddle.nn.functional as F -from paddlespeech.audio.utils import MODEL_HOME from paddlespeech.audio.utils.download import load_state_dict_from_url +from paddlespeech.utils.env import MODEL_HOME __all__ = ['CNN14', 'CNN10', 'CNN6', 'cnn14', 'cnn10', 'cnn6'] diff --git a/paddlespeech/resource/pretrained_models.py b/paddlespeech/resource/pretrained_models.py index f0a6ef31a93720f7d2ae9617d3ad053cdbc87f73..37303331bd82e7ba348246adf346f0d10809fd99 100644 --- a/paddlespeech/resource/pretrained_models.py +++ b/paddlespeech/resource/pretrained_models.py @@ -135,15 +135,21 @@ asr_dynamic_pretrained_models = { }, }, "deepspeech2online_wenetspeech-zh-16k": { - '1.0': { + '1.0.3': { 'url': - 'https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr0/asr0_deepspeech2_online_wenetspeech_ckpt_1.0.2.model.tar.gz', + 'http://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr0/asr0_deepspeech2_online_wenetspeech_ckpt_1.0.3.model.tar.gz', 'md5': - 'b0c77e7f8881e0a27b82127d1abb8d5f', + 'cfe273793e68f790f742b411c98bc75e', 'cfg_path': 'model.yaml', 'ckpt_path': 'exp/deepspeech2_online/checkpoints/avg_10', + 'model': + 'exp/deepspeech2_online/checkpoints/avg_10.jit.pdmodel', + 'params': + 'exp/deepspeech2_online/checkpoints/avg_10.jit.pdiparams', + 'onnx_model': + 'onnx/model.onnx', 'lm_url': 'https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm', 'lm_md5': @@ -170,14 +176,22 @@ asr_dynamic_pretrained_models = { '1.0.2': { 'url': 'http://paddlespeech.bj.bcebos.com/s2t/aishell/asr0/asr0_deepspeech2_online_aishell_fbank161_ckpt_1.0.2.model.tar.gz', - 'md5': '4dd42cfce9aaa54db0ec698da6c48ec5', - 'cfg_path': 'model.yaml', - 'ckpt_path':'exp/deepspeech2_online/checkpoints/avg_1', - 'model':'exp/deepspeech2_online/checkpoints/avg_1.jit.pdmodel', - 'params':'exp/deepspeech2_online/checkpoints/avg_1.jit.pdiparams', - 'onnx_model': 'onnx/model.onnx', - 'lm_url':'https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm', - 'lm_md5':'29e02312deb2e59b3c8686c7966d4fe3' + 'md5': + '4dd42cfce9aaa54db0ec698da6c48ec5', + 'cfg_path': + 'model.yaml', + 'ckpt_path': + 'exp/deepspeech2_online/checkpoints/avg_1', + 'model': + 'exp/deepspeech2_online/checkpoints/avg_1.jit.pdmodel', + 'params': + 'exp/deepspeech2_online/checkpoints/avg_1.jit.pdiparams', + 'onnx_model': + 'onnx/model.onnx', + 'lm_url': + 'https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm', + 'lm_md5': + '29e02312deb2e59b3c8686c7966d4fe3' }, }, "deepspeech2offline_librispeech-en-16k": { @@ -220,35 +234,112 @@ asr_static_pretrained_models = { } }, "deepspeech2online_aishell-zh-16k": { + '1.0.1': { + 'url': + 'https://paddlespeech.bj.bcebos.com/s2t/aishell/asr0/asr0_deepspeech2_online_aishell_fbank161_ckpt_1.0.1.model.tar.gz', + 'md5': + 'df5ddeac8b679a470176649ac4b78726', + 'cfg_path': + 'model.yaml', + 'ckpt_path': + 'exp/deepspeech2_online/checkpoints/avg_1', + 'model': + 'exp/deepspeech2_online/checkpoints/avg_1.jit.pdmodel', + 'params': + 'exp/deepspeech2_online/checkpoints/avg_1.jit.pdiparams', + 'lm_url': + 'https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm', + 'lm_md5': + '29e02312deb2e59b3c8686c7966d4fe3' + }, '1.0.2': { 'url': 'http://paddlespeech.bj.bcebos.com/s2t/aishell/asr0/asr0_deepspeech2_online_aishell_fbank161_ckpt_1.0.2.model.tar.gz', - 'md5': '4dd42cfce9aaa54db0ec698da6c48ec5', - 'cfg_path': 'model.yaml', - 'ckpt_path':'exp/deepspeech2_online/checkpoints/avg_1', - 'model':'exp/deepspeech2_online/checkpoints/avg_1.jit.pdmodel', - 'params':'exp/deepspeech2_online/checkpoints/avg_1.jit.pdiparams', - 'onnx_model': 'onnx/model.onnx', - 'lm_url':'https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm', - 'lm_md5':'29e02312deb2e59b3c8686c7966d4fe3' + 'md5': + '4dd42cfce9aaa54db0ec698da6c48ec5', + 'cfg_path': + 'model.yaml', + 'ckpt_path': + 'exp/deepspeech2_online/checkpoints/avg_1', + 'model': + 'exp/deepspeech2_online/checkpoints/avg_1.jit.pdmodel', + 'params': + 'exp/deepspeech2_online/checkpoints/avg_1.jit.pdiparams', + 'onnx_model': + 'onnx/model.onnx', + 'lm_url': + 'https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm', + 'lm_md5': + '29e02312deb2e59b3c8686c7966d4fe3' + }, + }, + "deepspeech2online_wenetspeech-zh-16k": { + '1.0.3': { + 'url': + 'http://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr0/asr0_deepspeech2_online_wenetspeech_ckpt_1.0.3.model.tar.gz', + 'md5': + 'cfe273793e68f790f742b411c98bc75e', + 'cfg_path': + 'model.yaml', + 'ckpt_path': + 'exp/deepspeech2_online/checkpoints/avg_10', + 'model': + 'exp/deepspeech2_online/checkpoints/avg_10.jit.pdmodel', + 'params': + 'exp/deepspeech2_online/checkpoints/avg_10.jit.pdiparams', + 'onnx_model': + 'onnx/model.onnx', + 'lm_url': + 'https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm', + 'lm_md5': + '29e02312deb2e59b3c8686c7966d4fe3' }, }, } - asr_onnx_pretrained_models = { - "deepspeech2online_aishell-zh-16k": { + "deepspeech2online_aishell-zh-16k": { '1.0.2': { 'url': 'http://paddlespeech.bj.bcebos.com/s2t/aishell/asr0/asr0_deepspeech2_online_aishell_fbank161_ckpt_1.0.2.model.tar.gz', - 'md5': '4dd42cfce9aaa54db0ec698da6c48ec5', - 'cfg_path': 'model.yaml', - 'ckpt_path':'exp/deepspeech2_online/checkpoints/avg_1', - 'model':'exp/deepspeech2_online/checkpoints/avg_1.jit.pdmodel', - 'params':'exp/deepspeech2_online/checkpoints/avg_1.jit.pdiparams', - 'onnx_model': 'onnx/model.onnx', - 'lm_url':'https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm', - 'lm_md5':'29e02312deb2e59b3c8686c7966d4fe3' + 'md5': + '4dd42cfce9aaa54db0ec698da6c48ec5', + 'cfg_path': + 'model.yaml', + 'ckpt_path': + 'exp/deepspeech2_online/checkpoints/avg_1', + 'model': + 'exp/deepspeech2_online/checkpoints/avg_1.jit.pdmodel', + 'params': + 'exp/deepspeech2_online/checkpoints/avg_1.jit.pdiparams', + 'onnx_model': + 'onnx/model.onnx', + 'lm_url': + 'https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm', + 'lm_md5': + '29e02312deb2e59b3c8686c7966d4fe3' + }, + }, + "deepspeech2online_wenetspeech-zh-16k": { + '1.0.3': { + 'url': + 'http://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr0/asr0_deepspeech2_online_wenetspeech_ckpt_1.0.3.model.tar.gz', + 'md5': + 'cfe273793e68f790f742b411c98bc75e', + 'cfg_path': + 'model.yaml', + 'ckpt_path': + 'exp/deepspeech2_online/checkpoints/avg_10', + 'model': + 'exp/deepspeech2_online/checkpoints/avg_10.jit.pdmodel', + 'params': + 'exp/deepspeech2_online/checkpoints/avg_10.jit.pdiparams', + 'onnx_model': + 'onnx/model.onnx', + 'lm_url': + 'https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm', + 'lm_md5': + '29e02312deb2e59b3c8686c7966d4fe3' }, }, } diff --git a/paddlespeech/resource/resource.py b/paddlespeech/resource/resource.py index 369dba900079b4bf8591c1862d46a1d7b3898d74..45707eb442615c1f9de87ebc6443e518deb8788f 100644 --- a/paddlespeech/resource/resource.py +++ b/paddlespeech/resource/resource.py @@ -18,8 +18,8 @@ from typing import List from typing import Optional from ..cli.utils import download_and_decompress -from ..cli.utils import MODEL_HOME from ..utils.dynamic_import import dynamic_import +from ..utils.env import MODEL_HOME from .model_alias import model_alias task_supported = ['asr', 'cls', 'st', 'text', 'tts', 'vector'] @@ -164,9 +164,10 @@ class CommonTaskResource: try: import_models = '{}_{}_pretrained_models'.format(self.task, self.model_format) + print(f"from .pretrained_models import {import_models}") exec('from .pretrained_models import {}'.format(import_models)) models = OrderedDict(locals()[import_models]) - except ImportError: + except Exception as e: models = OrderedDict({}) # no models. finally: return models diff --git a/paddlespeech/server/bin/paddlespeech_server.py b/paddlespeech/server/bin/paddlespeech_server.py index 11f50655f73a85df76a05abd080ee2ce41985ce7..175e8ffb66fe6d6dabbe07120edf1d5286c8316e 100644 --- a/paddlespeech/server/bin/paddlespeech_server.py +++ b/paddlespeech/server/bin/paddlespeech_server.py @@ -18,6 +18,7 @@ from typing import List import uvicorn from fastapi import FastAPI +from starlette.middleware.cors import CORSMiddleware from prettytable import PrettyTable from starlette.middleware.cors import CORSMiddleware @@ -45,7 +46,6 @@ app.add_middleware( allow_methods=["*"], allow_headers=["*"]) - @cli_server_register( name='paddlespeech_server.start', description='Start the service') class ServerExecutor(BaseExecutor): diff --git a/paddlespeech/server/conf/ws_ds2_application.yaml b/paddlespeech/server/conf/ws_ds2_application.yaml new file mode 100644 index 0000000000000000000000000000000000000000..909c2f187b4855c0c4620f7e89a557dd5f3a28a1 --- /dev/null +++ b/paddlespeech/server/conf/ws_ds2_application.yaml @@ -0,0 +1,84 @@ +# This is the parameter configuration file for PaddleSpeech Serving. + +################################################################################# +# SERVER SETTING # +################################################################################# +host: 0.0.0.0 +port: 8090 + +# The task format in the engin_list is: _ +# task choices = ['asr_online-inference', 'asr_online-onnx'] +# protocol = ['websocket'] (only one can be selected). +# websocket only support online engine type. +protocol: 'websocket' +engine_list: ['asr_online-onnx'] + + +################################################################################# +# ENGINE CONFIG # +################################################################################# + +################################### ASR ######################################### +################### speech task: asr; engine_type: online-inference ####################### +asr_online-inference: + model_type: 'deepspeech2online_wenetspeech' + am_model: # the pdmodel file of am static model [optional] + am_params: # the pdiparams file of am static model [optional] + lang: 'zh' + sample_rate: 16000 + cfg_path: + decode_method: + num_decoding_left_chunks: + force_yes: True + device: 'cpu' # cpu or gpu:id + + am_predictor_conf: + device: # set 'gpu:id' or 'cpu' + switch_ir_optim: True + glog_info: False # True -> print glog + summary: True # False -> do not show predictor config + + chunk_buffer_conf: + frame_duration_ms: 80 + shift_ms: 40 + sample_rate: 16000 + sample_width: 2 + window_n: 7 # frame + shift_n: 4 # frame + window_ms: 25 # ms + shift_ms: 10 # ms + + + +################################### ASR ######################################### +################### speech task: asr; engine_type: online-onnx ####################### +asr_online-onnx: + model_type: 'deepspeech2online_wenetspeech' + am_model: # the pdmodel file of onnx am static model [optional] + am_params: # the pdiparams file of am static model [optional] + lang: 'zh' + sample_rate: 16000 + cfg_path: + decode_method: + num_decoding_left_chunks: + force_yes: True + device: 'cpu' # cpu or gpu:id + + # https://onnxruntime.ai/docs/api/python/api_summary.html#inferencesession + am_predictor_conf: + device: 'cpu' # set 'gpu:id' or 'cpu' + graph_optimization_level: 0 + intra_op_num_threads: 0 # Sets the number of threads used to parallelize the execution within nodes. + inter_op_num_threads: 0 # Sets the number of threads used to parallelize the execution of the graph (across nodes). + log_severity_level: 2 # Log severity level. Applies to session load, initialization, etc. 0:Verbose, 1:Info, 2:Warning. 3:Error, 4:Fatal. Default is 2. + log_verbosity_level: 0 # VLOG level if DEBUG build and session_log_severity_level is 0. Applies to session load, initialization, etc. Default is 0. + + chunk_buffer_conf: + frame_duration_ms: 85 + shift_ms: 40 + sample_rate: 16000 + sample_width: 2 + window_n: 7 # frame + shift_n: 4 # frame + window_ms: 25 # ms + shift_ms: 10 # ms diff --git a/paddlespeech/server/engine/asr/online/onnx/__init__.py b/paddlespeech/server/engine/asr/online/onnx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..97043fd7ba6885aac81cad5a49924c23c67d4d47 --- /dev/null +++ b/paddlespeech/server/engine/asr/online/onnx/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddlespeech/server/engine/asr/online/onnx/asr_engine.py b/paddlespeech/server/engine/asr/online/onnx/asr_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..0679316437f8b050773c69986430640bbc83a6dd --- /dev/null +++ b/paddlespeech/server/engine/asr/online/onnx/asr_engine.py @@ -0,0 +1,530 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +from typing import ByteString +from typing import Optional + +import numpy as np +import paddle +from numpy import float32 +from yacs.config import CfgNode + +from paddlespeech.cli.asr.infer import ASRExecutor +from paddlespeech.cli.log import logger +from paddlespeech.resource import CommonTaskResource +from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer +from paddlespeech.s2t.modules.ctc import CTCDecoder +from paddlespeech.s2t.transform.transformation import Transformation +from paddlespeech.s2t.utils.utility import UpdateConfig +from paddlespeech.server.engine.base_engine import BaseEngine +from paddlespeech.server.utils import onnx_infer +from paddlespeech.utils.env import MODEL_HOME + +__all__ = ['PaddleASRConnectionHanddler', 'ASRServerExecutor', 'ASREngine'] + + +# ASR server connection process class +class PaddleASRConnectionHanddler: + def __init__(self, asr_engine): + """Init a Paddle ASR Connection Handler instance + + Args: + asr_engine (ASREngine): the global asr engine + """ + super().__init__() + logger.info( + "create an paddle asr connection handler to process the websocket connection" + ) + self.config = asr_engine.config # server config + self.model_config = asr_engine.executor.config + self.asr_engine = asr_engine + + # model_type, sample_rate and text_feature is shared for deepspeech2 and conformer + self.model_type = self.asr_engine.executor.model_type + self.sample_rate = self.asr_engine.executor.sample_rate + # tokens to text + self.text_feature = self.asr_engine.executor.text_feature + + # extract feat, new only fbank in conformer model + self.preprocess_conf = self.model_config.preprocess_config + self.preprocess_args = {"train": False} + self.preprocessing = Transformation(self.preprocess_conf) + + # frame window and frame shift, in samples unit + self.win_length = self.preprocess_conf.process[0]['win_length'] + self.n_shift = self.preprocess_conf.process[0]['n_shift'] + + assert self.preprocess_conf.process[0]['fs'] == self.sample_rate, ( + self.sample_rate, self.preprocess_conf.process[0]['fs']) + self.frame_shift_in_ms = int( + self.n_shift / self.preprocess_conf.process[0]['fs'] * 1000) + + self.continuous_decoding = self.config.get("continuous_decoding", False) + self.init_decoder() + self.reset() + + def init_decoder(self): + if "deepspeech2" in self.model_type: + assert self.continuous_decoding is False, "ds2 model not support endpoint" + self.am_predictor = self.asr_engine.executor.am_predictor + + self.decoder = CTCDecoder( + odim=self.model_config.output_dim, # is in vocab + enc_n_units=self.model_config.rnn_layer_size * 2, + blank_id=self.model_config.blank_id, + dropout_rate=0.0, + reduction=True, # sum + batch_average=True, # sum / batch_size + grad_norm_type=self.model_config.get('ctc_grad_norm_type', + None)) + + cfg = self.model_config.decode + decode_batch_size = 1 # for online + self.decoder.init_decoder( + decode_batch_size, self.text_feature.vocab_list, + cfg.decoding_method, cfg.lang_model_path, cfg.alpha, cfg.beta, + cfg.beam_size, cfg.cutoff_prob, cfg.cutoff_top_n, + cfg.num_proc_bsearch) + else: + raise ValueError(f"Not supported: {self.model_type}") + + def model_reset(self): + # cache for audio and feat + self.remained_wav = None + self.cached_feat = None + + def output_reset(self): + ## outputs + # partial/ending decoding results + self.result_transcripts = [''] + + def reset_continuous_decoding(self): + """ + when in continous decoding, reset for next utterance. + """ + self.global_frame_offset = self.num_frames + self.model_reset() + + def reset(self): + if "deepspeech2" in self.model_type: + # for deepspeech2 + # init state + self.chunk_state_h_box = np.zeros( + (self.model_config.num_rnn_layers, 1, + self.model_config.rnn_layer_size), + dtype=float32) + self.chunk_state_c_box = np.zeros( + (self.model_config.num_rnn_layers, 1, + self.model_config.rnn_layer_size), + dtype=float32) + self.decoder.reset_decoder(batch_size=1) + else: + raise NotImplementedError(f"{self.model_type} not support.") + + self.device = None + + ## common + # global sample and frame step + self.num_samples = 0 + self.global_frame_offset = 0 + # frame step of cur utterance + self.num_frames = 0 + + ## endpoint + self.endpoint_state = False # True for detect endpoint + + ## conformer + self.model_reset() + + ## outputs + self.output_reset() + + def extract_feat(self, samples: ByteString): + logger.info("Online ASR extract the feat") + samples = np.frombuffer(samples, dtype=np.int16) + assert samples.ndim == 1 + + self.num_samples += samples.shape[0] + logger.info( + f"This package receive {samples.shape[0]} pcm data. Global samples:{self.num_samples}" + ) + + # self.reamined_wav stores all the samples, + # include the original remained_wav and this package samples + if self.remained_wav is None: + self.remained_wav = samples + else: + assert self.remained_wav.ndim == 1 # (T,) + self.remained_wav = np.concatenate([self.remained_wav, samples]) + logger.info( + f"The concatenation of remain and now audio samples length is: {self.remained_wav.shape}" + ) + + if len(self.remained_wav) < self.win_length: + # samples not enough for feature window + return 0 + + # fbank + x_chunk = self.preprocessing(self.remained_wav, **self.preprocess_args) + x_chunk = paddle.to_tensor(x_chunk, dtype="float32").unsqueeze(axis=0) + + # feature cache + if self.cached_feat is None: + self.cached_feat = x_chunk + else: + assert (len(x_chunk.shape) == 3) # (B,T,D) + assert (len(self.cached_feat.shape) == 3) # (B,T,D) + self.cached_feat = paddle.concat( + [self.cached_feat, x_chunk], axis=1) + + # set the feat device + if self.device is None: + self.device = self.cached_feat.place + + # cur frame step + num_frames = x_chunk.shape[1] + + # global frame step + self.num_frames += num_frames + + # update remained wav + self.remained_wav = self.remained_wav[self.n_shift * num_frames:] + + logger.info( + f"process the audio feature success, the cached feat shape: {self.cached_feat.shape}" + ) + logger.info( + f"After extract feat, the cached remain the audio samples: {self.remained_wav.shape}" + ) + logger.info(f"global samples: {self.num_samples}") + logger.info(f"global frames: {self.num_frames}") + + def decode(self, is_finished=False): + """advance decoding + + Args: + is_finished (bool, optional): Is last frame or not. Defaults to False. + + Returns: + None: + """ + if "deepspeech2" in self.model_type: + decoding_chunk_size = 1 # decoding chunk size = 1. int decoding frame unit + + context = 7 # context=7, in audio frame unit + subsampling = 4 # subsampling=4, in audio frame unit + + cached_feature_num = context - subsampling + # decoding window for model, in audio frame unit + decoding_window = (decoding_chunk_size - 1) * subsampling + context + # decoding stride for model, in audio frame unit + stride = subsampling * decoding_chunk_size + + if self.cached_feat is None: + logger.info("no audio feat, please input more pcm data") + return + + num_frames = self.cached_feat.shape[1] + logger.info( + f"Required decoding window {decoding_window} frames, and the connection has {num_frames} frames" + ) + + # the cached feat must be larger decoding_window + if num_frames < decoding_window and not is_finished: + logger.info( + f"frame feat num is less than {decoding_window}, please input more pcm data" + ) + return None, None + + # if is_finished=True, we need at least context frames + if num_frames < context: + logger.info( + "flast {num_frames} is less than context {context} frames, and we cannot do model forward" + ) + return None, None + + logger.info("start to do model forward") + # num_frames - context + 1 ensure that current frame can get context window + if is_finished: + # if get the finished chunk, we need process the last context + left_frames = context + else: + # we only process decoding_window frames for one chunk + left_frames = decoding_window + + end = None + for cur in range(0, num_frames - left_frames + 1, stride): + end = min(cur + decoding_window, num_frames) + + # extract the audio + x_chunk = self.cached_feat[:, cur:end, :].numpy() + x_chunk_lens = np.array([x_chunk.shape[1]]) + + trans_best = self.decode_one_chunk(x_chunk, x_chunk_lens) + + self.result_transcripts = [trans_best] + + # update feat cache + self.cached_feat = self.cached_feat[:, end - cached_feature_num:, :] + + # return trans_best[0] + else: + raise Exception(f"{self.model_type} not support paddleinference.") + + @paddle.no_grad() + def decode_one_chunk(self, x_chunk, x_chunk_lens): + """forward one chunk frames + + Args: + x_chunk (np.ndarray): (B,T,D), audio frames. + x_chunk_lens ([type]): (B,), audio frame lens + + Returns: + logprob: poster probability. + """ + logger.info("start to decoce one chunk for deepspeech2") + # state_c, state_h, audio_lens, audio + # 'chunk_state_c_box', 'chunk_state_h_box', 'audio_chunk_lens', 'audio_chunk' + input_names = [n.name for n in self.am_predictor.get_inputs()] + logger.info(f"ort inputs: {input_names}") + # 'softmax_0.tmp_0', 'tmp_5', 'concat_0.tmp_0', 'concat_1.tmp_0' + # audio, audio_lens, state_h, state_c + output_names = [n.name for n in self.am_predictor.get_outputs()] + logger.info(f"ort outpus: {output_names}") + assert (len(input_names) == len(output_names)) + assert isinstance(input_names[0], str) + + input_datas = [ + self.chunk_state_c_box, self.chunk_state_h_box, x_chunk_lens, + x_chunk + ] + feeds = dict(zip(input_names, input_datas)) + + outputs = self.am_predictor.run([*output_names], {**feeds}) + + output_chunk_probs, output_chunk_lens, self.chunk_state_h_box, self.chunk_state_c_box = outputs + self.decoder.next(output_chunk_probs, output_chunk_lens) + trans_best, trans_beam = self.decoder.decode() + logger.info(f"decode one best result for deepspeech2: {trans_best[0]}") + return trans_best[0] + + def get_result(self): + """return partial/ending asr result. + + Returns: + str: one best result of partial/ending. + """ + if len(self.result_transcripts) > 0: + return self.result_transcripts[0] + else: + return '' + + def get_word_time_stamp(self): + return [] + + @paddle.no_grad() + def rescoring(self): + ... + + +class ASRServerExecutor(ASRExecutor): + def __init__(self): + super().__init__() + self.task_resource = CommonTaskResource( + task='asr', model_format='onnx', inference_mode='online') + + def update_config(self) -> None: + if "deepspeech2" in self.model_type: + with UpdateConfig(self.config): + # download lm + self.config.decode.lang_model_path = os.path.join( + MODEL_HOME, 'language_model', + self.config.decode.lang_model_path) + + lm_url = self.task_resource.res_dict['lm_url'] + lm_md5 = self.task_resource.res_dict['lm_md5'] + logger.info(f"Start to load language model {lm_url}") + self.download_lm( + lm_url, + os.path.dirname(self.config.decode.lang_model_path), lm_md5) + else: + raise NotImplementedError( + f"{self.model_type} not support paddleinference.") + + def init_model(self) -> None: + + if "deepspeech2" in self.model_type: + # AM predictor + logger.info("ASR engine start to init the am predictor") + self.am_predictor = onnx_infer.get_sess( + model_path=self.am_model, sess_conf=self.am_predictor_conf) + else: + raise NotImplementedError( + f"{self.model_type} not support paddleinference.") + + def _init_from_path(self, + model_type: str=None, + am_model: Optional[os.PathLike]=None, + am_params: Optional[os.PathLike]=None, + lang: str='zh', + sample_rate: int=16000, + cfg_path: Optional[os.PathLike]=None, + decode_method: str='attention_rescoring', + num_decoding_left_chunks: int=-1, + am_predictor_conf: dict=None): + """ + Init model and other resources from a specific path. + """ + if not model_type or not lang or not sample_rate: + logger.error( + "The model type or lang or sample rate is None, please input an valid server parameter yaml" + ) + return False + assert am_params is None, "am_params not used in onnx engine" + + self.model_type = model_type + self.sample_rate = sample_rate + self.decode_method = decode_method + self.num_decoding_left_chunks = num_decoding_left_chunks + # conf for paddleinference predictor or onnx + self.am_predictor_conf = am_predictor_conf + logger.info(f"model_type: {self.model_type}") + + sample_rate_str = '16k' if sample_rate == 16000 else '8k' + tag = model_type + '-' + lang + '-' + sample_rate_str + self.task_resource.set_task_model(model_tag=tag) + + if cfg_path is None: + self.res_path = self.task_resource.res_dir + self.cfg_path = os.path.join( + self.res_path, self.task_resource.res_dict['cfg_path']) + else: + self.cfg_path = os.path.abspath(cfg_path) + self.res_path = os.path.dirname( + os.path.dirname(os.path.abspath(self.cfg_path))) + + self.am_model = os.path.join(self.res_path, self.task_resource.res_dict[ + 'onnx_model']) if am_model is None else os.path.abspath(am_model) + + # self.am_params = os.path.join( + # self.res_path, self.task_resource.res_dict[ + # 'params']) if am_params is None else os.path.abspath(am_params) + + logger.info("Load the pretrained model:") + logger.info(f" tag = {tag}") + logger.info(f" res_path: {self.res_path}") + logger.info(f" cfg path: {self.cfg_path}") + logger.info(f" am_model path: {self.am_model}") + # logger.info(f" am_params path: {self.am_params}") + + #Init body. + self.config = CfgNode(new_allowed=True) + self.config.merge_from_file(self.cfg_path) + + if self.config.spm_model_prefix: + self.config.spm_model_prefix = os.path.join( + self.res_path, self.config.spm_model_prefix) + logger.info(f"spm model path: {self.config.spm_model_prefix}") + + self.vocab = self.config.vocab_filepath + + self.text_feature = TextFeaturizer( + unit_type=self.config.unit_type, + vocab=self.config.vocab_filepath, + spm_model_prefix=self.config.spm_model_prefix) + + self.update_config() + + # AM predictor + self.init_model() + + logger.info(f"create the {model_type} model success") + return True + + +class ASREngine(BaseEngine): + """ASR model resource + + Args: + metaclass: Defaults to Singleton. + """ + + def __init__(self): + super(ASREngine, self).__init__() + + def init_model(self) -> bool: + if not self.executor._init_from_path( + model_type=self.config.model_type, + am_model=self.config.am_model, + am_params=self.config.am_params, + lang=self.config.lang, + sample_rate=self.config.sample_rate, + cfg_path=self.config.cfg_path, + decode_method=self.config.decode_method, + num_decoding_left_chunks=self.config.num_decoding_left_chunks, + am_predictor_conf=self.config.am_predictor_conf): + return False + return True + + def init(self, config: dict) -> bool: + """init engine resource + + Args: + config_file (str): config file + + Returns: + bool: init failed or success + """ + self.config = config + self.executor = ASRServerExecutor() + + try: + self.device = self.config.get("device", paddle.get_device()) + paddle.set_device(self.device) + except BaseException as e: + logger.error( + f"Set device failed, please check if device '{self.device}' is already used and the parameter 'device' in the yaml file" + ) + logger.error( + "If all GPU or XPU is used, you can set the server to 'cpu'") + sys.exit(-1) + + logger.info(f"paddlespeech_server set the device: {self.device}") + + if not self.init_model(): + logger.error( + "Init the ASR server occurs error, please check the server configuration yaml" + ) + return False + + logger.info("Initialize ASR server engine successfully.") + return True + + def new_handler(self): + """New handler from model. + + Returns: + PaddleASRConnectionHanddler: asr handler instance + """ + return PaddleASRConnectionHanddler(self) + + def preprocess(self, *args, **kwargs): + raise NotImplementedError("Online not using this.") + + def run(self, *args, **kwargs): + raise NotImplementedError("Online not using this.") + + def postprocess(self): + raise NotImplementedError("Online not using this.") diff --git a/paddlespeech/server/engine/asr/online/paddleinference/__init__.py b/paddlespeech/server/engine/asr/online/paddleinference/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..97043fd7ba6885aac81cad5a49924c23c67d4d47 --- /dev/null +++ b/paddlespeech/server/engine/asr/online/paddleinference/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddlespeech/server/engine/asr/online/paddleinference/asr_engine.py b/paddlespeech/server/engine/asr/online/paddleinference/asr_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..efb726aaff3813a456cfac37f641a057fd2d5ed6 --- /dev/null +++ b/paddlespeech/server/engine/asr/online/paddleinference/asr_engine.py @@ -0,0 +1,545 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +from typing import ByteString +from typing import Optional + +import numpy as np +import paddle +from numpy import float32 +from yacs.config import CfgNode + +from paddlespeech.cli.asr.infer import ASRExecutor +from paddlespeech.cli.log import logger +from paddlespeech.resource import CommonTaskResource +from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer +from paddlespeech.s2t.modules.ctc import CTCDecoder +from paddlespeech.s2t.transform.transformation import Transformation +from paddlespeech.s2t.utils.utility import UpdateConfig +from paddlespeech.server.engine.base_engine import BaseEngine +from paddlespeech.server.utils.paddle_predictor import init_predictor +from paddlespeech.utils.env import MODEL_HOME + +__all__ = ['PaddleASRConnectionHanddler', 'ASRServerExecutor', 'ASREngine'] + + +# ASR server connection process class +class PaddleASRConnectionHanddler: + def __init__(self, asr_engine): + """Init a Paddle ASR Connection Handler instance + + Args: + asr_engine (ASREngine): the global asr engine + """ + super().__init__() + logger.info( + "create an paddle asr connection handler to process the websocket connection" + ) + self.config = asr_engine.config # server config + self.model_config = asr_engine.executor.config + self.asr_engine = asr_engine + + # model_type, sample_rate and text_feature is shared for deepspeech2 and conformer + self.model_type = self.asr_engine.executor.model_type + self.sample_rate = self.asr_engine.executor.sample_rate + # tokens to text + self.text_feature = self.asr_engine.executor.text_feature + + # extract feat, new only fbank in conformer model + self.preprocess_conf = self.model_config.preprocess_config + self.preprocess_args = {"train": False} + self.preprocessing = Transformation(self.preprocess_conf) + + # frame window and frame shift, in samples unit + self.win_length = self.preprocess_conf.process[0]['win_length'] + self.n_shift = self.preprocess_conf.process[0]['n_shift'] + + assert self.preprocess_conf.process[0]['fs'] == self.sample_rate, ( + self.sample_rate, self.preprocess_conf.process[0]['fs']) + self.frame_shift_in_ms = int( + self.n_shift / self.preprocess_conf.process[0]['fs'] * 1000) + + self.continuous_decoding = self.config.get("continuous_decoding", False) + self.init_decoder() + self.reset() + + def init_decoder(self): + if "deepspeech2" in self.model_type: + assert self.continuous_decoding is False, "ds2 model not support endpoint" + self.am_predictor = self.asr_engine.executor.am_predictor + + self.decoder = CTCDecoder( + odim=self.model_config.output_dim, # is in vocab + enc_n_units=self.model_config.rnn_layer_size * 2, + blank_id=self.model_config.blank_id, + dropout_rate=0.0, + reduction=True, # sum + batch_average=True, # sum / batch_size + grad_norm_type=self.model_config.get('ctc_grad_norm_type', + None)) + + cfg = self.model_config.decode + decode_batch_size = 1 # for online + self.decoder.init_decoder( + decode_batch_size, self.text_feature.vocab_list, + cfg.decoding_method, cfg.lang_model_path, cfg.alpha, cfg.beta, + cfg.beam_size, cfg.cutoff_prob, cfg.cutoff_top_n, + cfg.num_proc_bsearch) + else: + raise ValueError(f"Not supported: {self.model_type}") + + def model_reset(self): + # cache for audio and feat + self.remained_wav = None + self.cached_feat = None + + def output_reset(self): + ## outputs + # partial/ending decoding results + self.result_transcripts = [''] + + def reset_continuous_decoding(self): + """ + when in continous decoding, reset for next utterance. + """ + self.global_frame_offset = self.num_frames + self.model_reset() + + def reset(self): + if "deepspeech2" in self.model_type: + # for deepspeech2 + # init state + self.chunk_state_h_box = np.zeros( + (self.model_config.num_rnn_layers, 1, + self.model_config.rnn_layer_size), + dtype=float32) + self.chunk_state_c_box = np.zeros( + (self.model_config.num_rnn_layers, 1, + self.model_config.rnn_layer_size), + dtype=float32) + self.decoder.reset_decoder(batch_size=1) + else: + raise NotImplementedError(f"{self.model_type} not support.") + + self.device = None + + ## common + # global sample and frame step + self.num_samples = 0 + self.global_frame_offset = 0 + # frame step of cur utterance + self.num_frames = 0 + + ## endpoint + self.endpoint_state = False # True for detect endpoint + + ## conformer + self.model_reset() + + ## outputs + self.output_reset() + + def extract_feat(self, samples: ByteString): + logger.info("Online ASR extract the feat") + samples = np.frombuffer(samples, dtype=np.int16) + assert samples.ndim == 1 + + self.num_samples += samples.shape[0] + logger.info( + f"This package receive {samples.shape[0]} pcm data. Global samples:{self.num_samples}" + ) + + # self.reamined_wav stores all the samples, + # include the original remained_wav and this package samples + if self.remained_wav is None: + self.remained_wav = samples + else: + assert self.remained_wav.ndim == 1 # (T,) + self.remained_wav = np.concatenate([self.remained_wav, samples]) + logger.info( + f"The concatenation of remain and now audio samples length is: {self.remained_wav.shape}" + ) + + if len(self.remained_wav) < self.win_length: + # samples not enough for feature window + return 0 + + # fbank + x_chunk = self.preprocessing(self.remained_wav, **self.preprocess_args) + x_chunk = paddle.to_tensor(x_chunk, dtype="float32").unsqueeze(axis=0) + + # feature cache + if self.cached_feat is None: + self.cached_feat = x_chunk + else: + assert (len(x_chunk.shape) == 3) # (B,T,D) + assert (len(self.cached_feat.shape) == 3) # (B,T,D) + self.cached_feat = paddle.concat( + [self.cached_feat, x_chunk], axis=1) + + # set the feat device + if self.device is None: + self.device = self.cached_feat.place + + # cur frame step + num_frames = x_chunk.shape[1] + + # global frame step + self.num_frames += num_frames + + # update remained wav + self.remained_wav = self.remained_wav[self.n_shift * num_frames:] + + logger.info( + f"process the audio feature success, the cached feat shape: {self.cached_feat.shape}" + ) + logger.info( + f"After extract feat, the cached remain the audio samples: {self.remained_wav.shape}" + ) + logger.info(f"global samples: {self.num_samples}") + logger.info(f"global frames: {self.num_frames}") + + def decode(self, is_finished=False): + """advance decoding + + Args: + is_finished (bool, optional): Is last frame or not. Defaults to False. + + Returns: + None: + """ + if "deepspeech2" in self.model_type: + decoding_chunk_size = 1 # decoding chunk size = 1. int decoding frame unit + + context = 7 # context=7, in audio frame unit + subsampling = 4 # subsampling=4, in audio frame unit + + cached_feature_num = context - subsampling + # decoding window for model, in audio frame unit + decoding_window = (decoding_chunk_size - 1) * subsampling + context + # decoding stride for model, in audio frame unit + stride = subsampling * decoding_chunk_size + + if self.cached_feat is None: + logger.info("no audio feat, please input more pcm data") + return + + num_frames = self.cached_feat.shape[1] + logger.info( + f"Required decoding window {decoding_window} frames, and the connection has {num_frames} frames" + ) + + # the cached feat must be larger decoding_window + if num_frames < decoding_window and not is_finished: + logger.info( + f"frame feat num is less than {decoding_window}, please input more pcm data" + ) + return None, None + + # if is_finished=True, we need at least context frames + if num_frames < context: + logger.info( + "flast {num_frames} is less than context {context} frames, and we cannot do model forward" + ) + return None, None + + logger.info("start to do model forward") + # num_frames - context + 1 ensure that current frame can get context window + if is_finished: + # if get the finished chunk, we need process the last context + left_frames = context + else: + # we only process decoding_window frames for one chunk + left_frames = decoding_window + + end = None + for cur in range(0, num_frames - left_frames + 1, stride): + end = min(cur + decoding_window, num_frames) + + # extract the audio + x_chunk = self.cached_feat[:, cur:end, :].numpy() + x_chunk_lens = np.array([x_chunk.shape[1]]) + + trans_best = self.decode_one_chunk(x_chunk, x_chunk_lens) + + self.result_transcripts = [trans_best] + + # update feat cache + self.cached_feat = self.cached_feat[:, end - cached_feature_num:, :] + + # return trans_best[0] + else: + raise Exception(f"{self.model_type} not support paddleinference.") + + @paddle.no_grad() + def decode_one_chunk(self, x_chunk, x_chunk_lens): + """forward one chunk frames + + Args: + x_chunk (np.ndarray): (B,T,D), audio frames. + x_chunk_lens ([type]): (B,), audio frame lens + + Returns: + logprob: poster probability. + """ + logger.info("start to decoce one chunk for deepspeech2") + input_names = self.am_predictor.get_input_names() + audio_handle = self.am_predictor.get_input_handle(input_names[0]) + audio_len_handle = self.am_predictor.get_input_handle(input_names[1]) + h_box_handle = self.am_predictor.get_input_handle(input_names[2]) + c_box_handle = self.am_predictor.get_input_handle(input_names[3]) + + audio_handle.reshape(x_chunk.shape) + audio_handle.copy_from_cpu(x_chunk) + + audio_len_handle.reshape(x_chunk_lens.shape) + audio_len_handle.copy_from_cpu(x_chunk_lens) + + h_box_handle.reshape(self.chunk_state_h_box.shape) + h_box_handle.copy_from_cpu(self.chunk_state_h_box) + + c_box_handle.reshape(self.chunk_state_c_box.shape) + c_box_handle.copy_from_cpu(self.chunk_state_c_box) + + output_names = self.am_predictor.get_output_names() + output_handle = self.am_predictor.get_output_handle(output_names[0]) + output_lens_handle = self.am_predictor.get_output_handle( + output_names[1]) + output_state_h_handle = self.am_predictor.get_output_handle( + output_names[2]) + output_state_c_handle = self.am_predictor.get_output_handle( + output_names[3]) + + self.am_predictor.run() + + output_chunk_probs = output_handle.copy_to_cpu() + output_chunk_lens = output_lens_handle.copy_to_cpu() + self.chunk_state_h_box = output_state_h_handle.copy_to_cpu() + self.chunk_state_c_box = output_state_c_handle.copy_to_cpu() + + self.decoder.next(output_chunk_probs, output_chunk_lens) + trans_best, trans_beam = self.decoder.decode() + logger.info(f"decode one best result for deepspeech2: {trans_best[0]}") + return trans_best[0] + + def get_result(self): + """return partial/ending asr result. + + Returns: + str: one best result of partial/ending. + """ + if len(self.result_transcripts) > 0: + return self.result_transcripts[0] + else: + return '' + + def get_word_time_stamp(self): + return [] + + @paddle.no_grad() + def rescoring(self): + ... + + +class ASRServerExecutor(ASRExecutor): + def __init__(self): + super().__init__() + self.task_resource = CommonTaskResource( + task='asr', model_format='static', inference_mode='online') + + def update_config(self) -> None: + if "deepspeech2" in self.model_type: + with UpdateConfig(self.config): + # download lm + self.config.decode.lang_model_path = os.path.join( + MODEL_HOME, 'language_model', + self.config.decode.lang_model_path) + + lm_url = self.task_resource.res_dict['lm_url'] + lm_md5 = self.task_resource.res_dict['lm_md5'] + logger.info(f"Start to load language model {lm_url}") + self.download_lm( + lm_url, + os.path.dirname(self.config.decode.lang_model_path), lm_md5) + else: + raise NotImplementedError( + f"{self.model_type} not support paddleinference.") + + def init_model(self) -> None: + + if "deepspeech2" in self.model_type: + # AM predictor + logger.info("ASR engine start to init the am predictor") + self.am_predictor = init_predictor( + model_file=self.am_model, + params_file=self.am_params, + predictor_conf=self.am_predictor_conf) + else: + raise NotImplementedError( + f"{self.model_type} not support paddleinference.") + + def _init_from_path(self, + model_type: str=None, + am_model: Optional[os.PathLike]=None, + am_params: Optional[os.PathLike]=None, + lang: str='zh', + sample_rate: int=16000, + cfg_path: Optional[os.PathLike]=None, + decode_method: str='attention_rescoring', + num_decoding_left_chunks: int=-1, + am_predictor_conf: dict=None): + """ + Init model and other resources from a specific path. + """ + if not model_type or not lang or not sample_rate: + logger.error( + "The model type or lang or sample rate is None, please input an valid server parameter yaml" + ) + return False + + self.model_type = model_type + self.sample_rate = sample_rate + self.decode_method = decode_method + self.num_decoding_left_chunks = num_decoding_left_chunks + # conf for paddleinference predictor or onnx + self.am_predictor_conf = am_predictor_conf + logger.info(f"model_type: {self.model_type}") + + sample_rate_str = '16k' if sample_rate == 16000 else '8k' + tag = model_type + '-' + lang + '-' + sample_rate_str + self.task_resource.set_task_model(model_tag=tag) + + if cfg_path is None or am_model is None or am_params is None: + self.res_path = self.task_resource.res_dir + self.cfg_path = os.path.join( + self.res_path, self.task_resource.res_dict['cfg_path']) + + self.am_model = os.path.join(self.res_path, + self.task_resource.res_dict['model']) + self.am_params = os.path.join(self.res_path, + self.task_resource.res_dict['params']) + else: + self.cfg_path = os.path.abspath(cfg_path) + self.am_model = os.path.abspath(am_model) + self.am_params = os.path.abspath(am_params) + self.res_path = os.path.dirname( + os.path.dirname(os.path.abspath(self.cfg_path))) + + logger.info("Load the pretrained model:") + logger.info(f" tag = {tag}") + logger.info(f" res_path: {self.res_path}") + logger.info(f" cfg path: {self.cfg_path}") + logger.info(f" am_model path: {self.am_model}") + logger.info(f" am_params path: {self.am_params}") + + #Init body. + self.config = CfgNode(new_allowed=True) + self.config.merge_from_file(self.cfg_path) + + if self.config.spm_model_prefix: + self.config.spm_model_prefix = os.path.join( + self.res_path, self.config.spm_model_prefix) + logger.info(f"spm model path: {self.config.spm_model_prefix}") + + self.vocab = self.config.vocab_filepath + + self.text_feature = TextFeaturizer( + unit_type=self.config.unit_type, + vocab=self.config.vocab_filepath, + spm_model_prefix=self.config.spm_model_prefix) + + self.update_config() + + # AM predictor + self.init_model() + + logger.info(f"create the {model_type} model success") + return True + + +class ASREngine(BaseEngine): + """ASR model resource + + Args: + metaclass: Defaults to Singleton. + """ + + def __init__(self): + super(ASREngine, self).__init__() + + def init_model(self) -> bool: + if not self.executor._init_from_path( + model_type=self.config.model_type, + am_model=self.config.am_model, + am_params=self.config.am_params, + lang=self.config.lang, + sample_rate=self.config.sample_rate, + cfg_path=self.config.cfg_path, + decode_method=self.config.decode_method, + num_decoding_left_chunks=self.config.num_decoding_left_chunks, + am_predictor_conf=self.config.am_predictor_conf): + return False + return True + + def init(self, config: dict) -> bool: + """init engine resource + + Args: + config_file (str): config file + + Returns: + bool: init failed or success + """ + self.config = config + self.executor = ASRServerExecutor() + + try: + self.device = self.config.get("device", paddle.get_device()) + paddle.set_device(self.device) + except BaseException as e: + logger.error( + f"Set device failed, please check if device '{self.device}' is already used and the parameter 'device' in the yaml file" + ) + logger.error( + "If all GPU or XPU is used, you can set the server to 'cpu'") + sys.exit(-1) + + logger.info(f"paddlespeech_server set the device: {self.device}") + + if not self.init_model(): + logger.error( + "Init the ASR server occurs error, please check the server configuration yaml" + ) + return False + + logger.info("Initialize ASR server engine successfully.") + return True + + def new_handler(self): + """New handler from model. + + Returns: + PaddleASRConnectionHanddler: asr handler instance + """ + return PaddleASRConnectionHanddler(self) + + def preprocess(self, *args, **kwargs): + raise NotImplementedError("Online not using this.") + + def run(self, *args, **kwargs): + raise NotImplementedError("Online not using this.") + + def postprocess(self): + raise NotImplementedError("Online not using this.") diff --git a/paddlespeech/server/engine/asr/online/python/__init__.py b/paddlespeech/server/engine/asr/online/python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..97043fd7ba6885aac81cad5a49924c23c67d4d47 --- /dev/null +++ b/paddlespeech/server/engine/asr/online/python/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddlespeech/server/engine/asr/online/asr_engine.py b/paddlespeech/server/engine/asr/online/python/asr_engine.py similarity index 96% rename from paddlespeech/server/engine/asr/online/asr_engine.py rename to paddlespeech/server/engine/asr/online/python/asr_engine.py index cb6a42b745a8fad1e00fe0fef0265a6c4006f68c..daa9fc500d5092d63bbda449dc35f9f93559491a 100644 --- a/paddlespeech/server/engine/asr/online/asr_engine.py +++ b/paddlespeech/server/engine/asr/online/python/asr_engine.py @@ -23,7 +23,6 @@ from yacs.config import CfgNode from paddlespeech.cli.asr.infer import ASRExecutor from paddlespeech.cli.log import logger -from paddlespeech.cli.utils import MODEL_HOME from paddlespeech.resource import CommonTaskResource from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer from paddlespeech.s2t.modules.ctc import CTCDecoder @@ -36,6 +35,7 @@ from paddlespeech.server.engine.asr.online.ctc_endpoint import OnlineCTCEndpoint from paddlespeech.server.engine.asr.online.ctc_search import CTCPrefixBeamSearch from paddlespeech.server.engine.base_engine import BaseEngine from paddlespeech.server.utils.paddle_predictor import init_predictor +from paddlespeech.utils.env import MODEL_HOME __all__ = ['PaddleASRConnectionHanddler', 'ASRServerExecutor', 'ASREngine'] @@ -121,13 +121,13 @@ class PaddleASRConnectionHanddler: raise ValueError(f"Not supported: {self.model_type}") def model_reset(self): - if "deepspeech2" in self.model_type: - return - # cache for audio and feat self.remained_wav = None self.cached_feat = None + if "deepspeech2" in self.model_type: + return + ## conformer # cache for conformer online self.subsampling_cache = None @@ -697,6 +697,66 @@ class ASRServerExecutor(ASRExecutor): self.task_resource = CommonTaskResource( task='asr', model_format='dynamic', inference_mode='online') + def update_config(self) -> None: + if "deepspeech2" in self.model_type: + with UpdateConfig(self.config): + # download lm + self.config.decode.lang_model_path = os.path.join( + MODEL_HOME, 'language_model', + self.config.decode.lang_model_path) + + lm_url = self.task_resource.res_dict['lm_url'] + lm_md5 = self.task_resource.res_dict['lm_md5'] + logger.info(f"Start to load language model {lm_url}") + self.download_lm( + lm_url, + os.path.dirname(self.config.decode.lang_model_path), lm_md5) + elif "conformer" in self.model_type or "transformer" in self.model_type: + with UpdateConfig(self.config): + logger.info("start to create the stream conformer asr engine") + # update the decoding method + if self.decode_method: + self.config.decode.decoding_method = self.decode_method + # update num_decoding_left_chunks + if self.num_decoding_left_chunks: + assert self.num_decoding_left_chunks == -1 or self.num_decoding_left_chunks >= 0, "num_decoding_left_chunks should be -1 or >=0" + self.config.decode.num_decoding_left_chunks = self.num_decoding_left_chunks + # we only support ctc_prefix_beam_search and attention_rescoring dedoding method + # Generally we set the decoding_method to attention_rescoring + if self.config.decode.decoding_method not in [ + "ctc_prefix_beam_search", "attention_rescoring" + ]: + logger.info( + "we set the decoding_method to attention_rescoring") + self.config.decode.decoding_method = "attention_rescoring" + + assert self.config.decode.decoding_method in [ + "ctc_prefix_beam_search", "attention_rescoring" + ], f"we only support ctc_prefix_beam_search and attention_rescoring dedoding method, current decoding method is {self.config.decode.decoding_method}" + else: + raise Exception(f"not support: {self.model_type}") + + def init_model(self) -> None: + if "deepspeech2" in self.model_type: + # AM predictor + logger.info("ASR engine start to init the am predictor") + self.am_predictor = init_predictor( + model_file=self.am_model, + params_file=self.am_params, + predictor_conf=self.am_predictor_conf) + elif "conformer" in self.model_type or "transformer" in self.model_type: + # load model + # model_type: {model_name}_{dataset} + model_name = self.model_type[:self.model_type.rindex('_')] + logger.info(f"model name: {model_name}") + model_class = self.task_resource.get_model_class(model_name) + model = model_class.from_config(self.config) + self.model = model + self.model.set_state_dict(paddle.load(self.am_model)) + self.model.eval() + else: + raise Exception(f"not support: {self.model_type}") + def _init_from_path(self, model_type: str=None, am_model: Optional[os.PathLike]=None, @@ -718,6 +778,10 @@ class ASRServerExecutor(ASRExecutor): self.model_type = model_type self.sample_rate = sample_rate + self.decode_method = decode_method + self.num_decoding_left_chunks = num_decoding_left_chunks + # conf for paddleinference predictor or onnx + self.am_predictor_conf = am_predictor_conf logger.info(f"model_type: {self.model_type}") sample_rate_str = '16k' if sample_rate == 16000 else '8k' @@ -763,63 +827,10 @@ class ASRServerExecutor(ASRExecutor): vocab=self.config.vocab_filepath, spm_model_prefix=self.config.spm_model_prefix) - if "deepspeech2" in model_type: - with UpdateConfig(self.config): - # download lm - self.config.decode.lang_model_path = os.path.join( - MODEL_HOME, 'language_model', - self.config.decode.lang_model_path) + self.update_config() - lm_url = self.task_resource.res_dict['lm_url'] - lm_md5 = self.task_resource.res_dict['lm_md5'] - logger.info(f"Start to load language model {lm_url}") - self.download_lm( - lm_url, - os.path.dirname(self.config.decode.lang_model_path), lm_md5) - - # AM predictor - logger.info("ASR engine start to init the am predictor") - self.am_predictor_conf = am_predictor_conf - self.am_predictor = init_predictor( - model_file=self.am_model, - params_file=self.am_params, - predictor_conf=self.am_predictor_conf) - - elif "conformer" in model_type or "transformer" in model_type: - with UpdateConfig(self.config): - logger.info("start to create the stream conformer asr engine") - # update the decoding method - if decode_method: - self.config.decode.decoding_method = decode_method - # update num_decoding_left_chunks - if num_decoding_left_chunks: - assert num_decoding_left_chunks == -1 or num_decoding_left_chunks >= 0, f"num_decoding_left_chunks should be -1 or >=0" - self.config.decode.num_decoding_left_chunks = num_decoding_left_chunks - - # we only support ctc_prefix_beam_search and attention_rescoring dedoding method - # Generally we set the decoding_method to attention_rescoring - if self.config.decode.decoding_method not in [ - "ctc_prefix_beam_search", "attention_rescoring" - ]: - logger.info( - "we set the decoding_method to attention_rescoring") - self.config.decode.decoding_method = "attention_rescoring" - - assert self.config.decode.decoding_method in [ - "ctc_prefix_beam_search", "attention_rescoring" - ], f"we only support ctc_prefix_beam_search and attention_rescoring dedoding method, current decoding method is {self.config.decode.decoding_method}" - - # load model - model_name = model_type[:model_type.rindex( - '_')] # model_type: {model_name}_{dataset} - logger.info(f"model name: {model_name}") - model_class = self.task_resource.get_model_class(model_name) - model = model_class.from_config(self.config) - self.model = model - self.model.set_state_dict(paddle.load(self.am_model)) - self.model.eval() - else: - raise Exception(f"not support: {model_type}") + # AM predictor + self.init_model() logger.info(f"create the {model_type} model success") return True @@ -834,7 +845,20 @@ class ASREngine(BaseEngine): def __init__(self): super(ASREngine, self).__init__() - logger.info("create the online asr engine resource instance") + + def init_model(self) -> bool: + if not self.executor._init_from_path( + model_type=self.config.model_type, + am_model=self.config.am_model, + am_params=self.config.am_params, + lang=self.config.lang, + sample_rate=self.config.sample_rate, + cfg_path=self.config.cfg_path, + decode_method=self.config.decode_method, + num_decoding_left_chunks=self.config.num_decoding_left_chunks, + am_predictor_conf=self.config.am_predictor_conf): + return False + return True def init(self, config: dict) -> bool: """init engine resource @@ -861,16 +885,7 @@ class ASREngine(BaseEngine): logger.info(f"paddlespeech_server set the device: {self.device}") - if not self.executor._init_from_path( - model_type=self.config.model_type, - am_model=self.config.am_model, - am_params=self.config.am_params, - lang=self.config.lang, - sample_rate=self.config.sample_rate, - cfg_path=self.config.cfg_path, - decode_method=self.config.decode_method, - num_decoding_left_chunks=self.config.num_decoding_left_chunks, - am_predictor_conf=self.config.am_predictor_conf): + if not self.init_model(): logger.error( "Init the ASR server occurs error, please check the server configuration yaml" ) diff --git a/paddlespeech/server/engine/asr/paddleinference/asr_engine.py b/paddlespeech/server/engine/asr/paddleinference/asr_engine.py index 1a3b4620a6de8ee829216e5b1f78b0682d5aa83d..572004eb8a7b707563ebceaefe58b98e68cfd12f 100644 --- a/paddlespeech/server/engine/asr/paddleinference/asr_engine.py +++ b/paddlespeech/server/engine/asr/paddleinference/asr_engine.py @@ -21,7 +21,6 @@ from yacs.config import CfgNode from paddlespeech.cli.asr.infer import ASRExecutor from paddlespeech.cli.log import logger -from paddlespeech.cli.utils import MODEL_HOME from paddlespeech.resource import CommonTaskResource from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer from paddlespeech.s2t.modules.ctc import CTCDecoder @@ -29,6 +28,7 @@ from paddlespeech.s2t.utils.utility import UpdateConfig from paddlespeech.server.engine.base_engine import BaseEngine from paddlespeech.server.utils.paddle_predictor import init_predictor from paddlespeech.server.utils.paddle_predictor import run_model +from paddlespeech.utils.env import MODEL_HOME __all__ = ['ASREngine', 'PaddleASRConnectionHandler'] diff --git a/paddlespeech/server/engine/engine_factory.py b/paddlespeech/server/engine/engine_factory.py index 5fdaacceaca3f9d0f0a84ddb82ad7a9426a219ab..6a66a002e4a4986e9000f9d841225e1be0cbfe82 100644 --- a/paddlespeech/server/engine/engine_factory.py +++ b/paddlespeech/server/engine/engine_factory.py @@ -13,12 +13,16 @@ # limitations under the License. from typing import Text +from ..utils.log import logger + __all__ = ['EngineFactory'] class EngineFactory(object): @staticmethod def get_engine(engine_name: Text, engine_type: Text): + logger.info(f"{engine_name} : {engine_type} engine.") + if engine_name == 'asr' and engine_type == 'inference': from paddlespeech.server.engine.asr.paddleinference.asr_engine import ASREngine return ASREngine() @@ -26,7 +30,13 @@ class EngineFactory(object): from paddlespeech.server.engine.asr.python.asr_engine import ASREngine return ASREngine() elif engine_name == 'asr' and engine_type == 'online': - from paddlespeech.server.engine.asr.online.asr_engine import ASREngine + from paddlespeech.server.engine.asr.online.python.asr_engine import ASREngine + return ASREngine() + elif engine_name == 'asr' and engine_type == 'online-inference': + from paddlespeech.server.engine.asr.online.paddleinference.asr_engine import ASREngine + return ASREngine() + elif engine_name == 'asr' and engine_type == 'online-onnx': + from paddlespeech.server.engine.asr.online.onnx.asr_engine import ASREngine return ASREngine() elif engine_name == 'tts' and engine_type == 'inference': from paddlespeech.server.engine.tts.paddleinference.tts_engine import TTSEngine diff --git a/paddlespeech/server/utils/onnx_infer.py b/paddlespeech/server/utils/onnx_infer.py index ac11c534b9ab85f9a70446d1bbff371ea2a2eff3..1c9d878f83f96d1d7ad44796eabe49ef9160078e 100644 --- a/paddlespeech/server/utils/onnx_infer.py +++ b/paddlespeech/server/utils/onnx_infer.py @@ -16,21 +16,34 @@ from typing import Optional import onnxruntime as ort +from .log import logger + def get_sess(model_path: Optional[os.PathLike]=None, sess_conf: dict=None): + logger.info(f"ort sessconf: {sess_conf}") sess_options = ort.SessionOptions() sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL + if sess_conf.get('graph_optimization_level', 99) == 0: + sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_DISABLE_ALL sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL - if "gpu" in sess_conf["device"]: + # "gpu:0" + providers = ['CPUExecutionProvider'] + if "gpu" in sess_conf.get("device", ""): + providers = ['CUDAExecutionProvider'] # fastspeech2/mb_melgan can't use trt now! - if sess_conf["use_trt"]: + if sess_conf.get("use_trt", 0): providers = ['TensorrtExecutionProvider'] - else: - providers = ['CUDAExecutionProvider'] - elif sess_conf["device"] == "cpu": - providers = ['CPUExecutionProvider'] - sess_options.intra_op_num_threads = sess_conf["cpu_threads"] + logger.info(f"ort providers: {providers}") + + if 'cpu_threads' in sess_conf: + sess_options.intra_op_num_threads = sess_conf.get("cpu_threads", 0) + else: + sess_options.intra_op_num_threads = sess_conf.get( + "intra_op_num_threads", 0) + + sess_options.inter_op_num_threads = sess_conf.get("inter_op_num_threads", 0) + sess = ort.InferenceSession( model_path, providers=providers, sess_options=sess_options) return sess diff --git a/paddlespeech/t2s/exps/fastspeech2/normalize.py b/paddlespeech/t2s/exps/fastspeech2/normalize.py index 8ec20ebf0f8f1865c45cdeed99d487e079e498b0..92d10832b731856e885b11ebf038dd91da42bd9c 100644 --- a/paddlespeech/t2s/exps/fastspeech2/normalize.py +++ b/paddlespeech/t2s/exps/fastspeech2/normalize.py @@ -58,30 +58,8 @@ def main(): "--phones-dict", type=str, default=None, help="phone vocabulary file.") parser.add_argument( "--speaker-dict", type=str, default=None, help="speaker id map file.") - parser.add_argument( - "--verbose", - type=int, - default=1, - help="logging level. higher is more logging. (default=1)") - args = parser.parse_args() - # set logger - if args.verbose > 1: - logging.basicConfig( - level=logging.DEBUG, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - elif args.verbose > 0: - logging.basicConfig( - level=logging.INFO, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - else: - logging.basicConfig( - level=logging.WARN, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - logging.warning('Skip DEBUG/INFO messages') + args = parser.parse_args() dumpdir = Path(args.dumpdir).expanduser() # use absolute path diff --git a/paddlespeech/t2s/exps/fastspeech2/preprocess.py b/paddlespeech/t2s/exps/fastspeech2/preprocess.py index eac75f9821dd69b798a097687a1101b8d717dc9c..0045c5a3319f2eeba7956b57d64d64cb6569d181 100644 --- a/paddlespeech/t2s/exps/fastspeech2/preprocess.py +++ b/paddlespeech/t2s/exps/fastspeech2/preprocess.py @@ -209,11 +209,6 @@ def main(): parser.add_argument("--config", type=str, help="fastspeech2 config file.") - parser.add_argument( - "--verbose", - type=int, - default=1, - help="logging level. higher is more logging. (default=1)") parser.add_argument( "--num-cpu", type=int, default=1, help="number of process.") @@ -248,10 +243,6 @@ def main(): with open(args.config, 'rt') as f: config = CfgNode(yaml.safe_load(f)) - if args.verbose > 1: - print(vars(args)) - print(config) - sentences, speaker_set = get_phn_dur(dur_file) merge_silence(sentences) diff --git a/paddlespeech/t2s/exps/gan_vocoder/normalize.py b/paddlespeech/t2s/exps/gan_vocoder/normalize.py index ba95d3ed61e341ebc458846a1f79099066c2cc7a..4cb7e41c576935e6d349eba7efe19914babded4d 100644 --- a/paddlespeech/t2s/exps/gan_vocoder/normalize.py +++ b/paddlespeech/t2s/exps/gan_vocoder/normalize.py @@ -47,30 +47,8 @@ def main(): default=False, action="store_true", help="whether to skip the copy of wav files.") - parser.add_argument( - "--verbose", - type=int, - default=1, - help="logging level. higher is more logging. (default=1)") - args = parser.parse_args() - # set logger - if args.verbose > 1: - logging.basicConfig( - level=logging.DEBUG, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - elif args.verbose > 0: - logging.basicConfig( - level=logging.INFO, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - else: - logging.basicConfig( - level=logging.WARN, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - logging.warning('Skip DEBUG/INFO messages') + args = parser.parse_args() dumpdir = Path(args.dumpdir).expanduser() # use absolute path diff --git a/paddlespeech/t2s/exps/gan_vocoder/preprocess.py b/paddlespeech/t2s/exps/gan_vocoder/preprocess.py index 546367964f98205318b1ad089604d2518472506e..05c6576829cff710c50235015a07c66781d381e4 100644 --- a/paddlespeech/t2s/exps/gan_vocoder/preprocess.py +++ b/paddlespeech/t2s/exps/gan_vocoder/preprocess.py @@ -167,11 +167,6 @@ def main(): required=True, help="directory to dump feature files.") parser.add_argument("--config", type=str, help="vocoder config file.") - parser.add_argument( - "--verbose", - type=int, - default=1, - help="logging level. higher is more logging. (default=1)") parser.add_argument( "--num-cpu", type=int, default=1, help="number of process.") parser.add_argument( @@ -197,10 +192,6 @@ def main(): with open(args.config, 'rt') as f: config = CfgNode(yaml.safe_load(f)) - if args.verbose > 1: - print(vars(args)) - print(config) - sentences, speaker_set = get_phn_dur(dur_file) merge_silence(sentences) diff --git a/paddlespeech/t2s/exps/speedyspeech/normalize.py b/paddlespeech/t2s/exps/speedyspeech/normalize.py index 249a4d6d83e59c933994a1532d0e836a0a8679c3..f29466f655ee7033654e8366095831cff0a18657 100644 --- a/paddlespeech/t2s/exps/speedyspeech/normalize.py +++ b/paddlespeech/t2s/exps/speedyspeech/normalize.py @@ -50,11 +50,6 @@ def main(): "--tones-dict", type=str, default=None, help="tone vocabulary file.") parser.add_argument( "--speaker-dict", type=str, default=None, help="speaker id map file.") - parser.add_argument( - "--verbose", - type=int, - default=1, - help="logging level. higher is more logging. (default=1)") parser.add_argument( "--use-relative-path", @@ -63,24 +58,6 @@ def main(): help="whether use relative path in metadata") args = parser.parse_args() - # set logger - if args.verbose > 1: - logging.basicConfig( - level=logging.DEBUG, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - elif args.verbose > 0: - logging.basicConfig( - level=logging.INFO, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - else: - logging.basicConfig( - level=logging.WARN, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - logging.warning('Skip DEBUG/INFO messages') - dumpdir = Path(args.dumpdir).expanduser() # use absolute path dumpdir = dumpdir.resolve() diff --git a/paddlespeech/t2s/exps/speedyspeech/preprocess.py b/paddlespeech/t2s/exps/speedyspeech/preprocess.py index aa7608d6b945b7fda3bdfab9ab74c1c080b20537..e4084c142f6cf00791930c6de1a11078d32a26c0 100644 --- a/paddlespeech/t2s/exps/speedyspeech/preprocess.py +++ b/paddlespeech/t2s/exps/speedyspeech/preprocess.py @@ -195,11 +195,6 @@ def main(): parser.add_argument("--config", type=str, help="fastspeech2 config file.") - parser.add_argument( - "--verbose", - type=int, - default=1, - help="logging level. higher is more logging. (default=1)") parser.add_argument( "--num-cpu", type=int, default=1, help="number of process.") @@ -230,10 +225,6 @@ def main(): with open(args.config, 'rt') as f: config = CfgNode(yaml.safe_load(f)) - if args.verbose > 1: - print(vars(args)) - print(config) - sentences, speaker_set = get_phn_dur(dur_file) merge_silence(sentences) diff --git a/paddlespeech/t2s/exps/tacotron2/preprocess.py b/paddlespeech/t2s/exps/tacotron2/preprocess.py index 6137da7f175b4e23af7a4b2e60908527bb65978d..c27b9769b7daa0de8857a27129e4f52dafee717f 100644 --- a/paddlespeech/t2s/exps/tacotron2/preprocess.py +++ b/paddlespeech/t2s/exps/tacotron2/preprocess.py @@ -184,11 +184,6 @@ def main(): parser.add_argument("--config", type=str, help="fastspeech2 config file.") - parser.add_argument( - "--verbose", - type=int, - default=1, - help="logging level. higher is more logging. (default=1)") parser.add_argument( "--num-cpu", type=int, default=1, help="number of process.") @@ -223,10 +218,6 @@ def main(): with open(args.config, 'rt') as f: config = CfgNode(yaml.safe_load(f)) - if args.verbose > 1: - print(vars(args)) - print(config) - sentences, speaker_set = get_phn_dur(dur_file) merge_silence(sentences) diff --git a/paddlespeech/t2s/exps/transformer_tts/normalize.py b/paddlespeech/t2s/exps/transformer_tts/normalize.py index 87e975b88ffb1b27c63885dfbe7fdb3c4cf5b718..e5f052c60dbbb84da35731a0eefb4a0c721b06be 100644 --- a/paddlespeech/t2s/exps/transformer_tts/normalize.py +++ b/paddlespeech/t2s/exps/transformer_tts/normalize.py @@ -51,30 +51,8 @@ def main(): "--phones-dict", type=str, default=None, help="phone vocabulary file.") parser.add_argument( "--speaker-dict", type=str, default=None, help="speaker id map file.") - parser.add_argument( - "--verbose", - type=int, - default=1, - help="logging level. higher is more logging. (default=1)") - args = parser.parse_args() - # set logger - if args.verbose > 1: - logging.basicConfig( - level=logging.DEBUG, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - elif args.verbose > 0: - logging.basicConfig( - level=logging.INFO, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - else: - logging.basicConfig( - level=logging.WARN, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - logging.warning('Skip DEBUG/INFO messages') + args = parser.parse_args() # check directory existence dumpdir = Path(args.dumpdir).resolve() diff --git a/paddlespeech/t2s/exps/transformer_tts/preprocess.py b/paddlespeech/t2s/exps/transformer_tts/preprocess.py index 28ca3de6eb455a3832029eb44e55e804b0fd8826..2ebd5ecc2fdbc0ebd69203b779b71809e9fad8c9 100644 --- a/paddlespeech/t2s/exps/transformer_tts/preprocess.py +++ b/paddlespeech/t2s/exps/transformer_tts/preprocess.py @@ -186,11 +186,6 @@ def main(): type=str, help="yaml format configuration file.") - parser.add_argument( - "--verbose", - type=int, - default=1, - help="logging level. higher is more logging. (default=1)") parser.add_argument( "--num-cpu", type=int, default=1, help="number of process.") @@ -210,10 +205,6 @@ def main(): _C = Configuration(_C) config = _C.clone() - if args.verbose > 1: - print(vars(args)) - print(config) - phone_id_map_path = dumpdir / "phone_id_map.txt" speaker_id_map_path = dumpdir / "speaker_id_map.txt" diff --git a/paddlespeech/t2s/exps/vits/normalize.py b/paddlespeech/t2s/exps/vits/normalize.py index 6fc8adb061a94acbaee6d960c87b9c1a5de41644..5881ae95c071255a583bda87869dfafd9cac2809 100644 --- a/paddlespeech/t2s/exps/vits/normalize.py +++ b/paddlespeech/t2s/exps/vits/normalize.py @@ -16,6 +16,7 @@ import argparse import logging from operator import itemgetter from pathlib import Path +from typing import List import jsonlines import numpy as np @@ -23,6 +24,50 @@ from sklearn.preprocessing import StandardScaler from tqdm import tqdm from paddlespeech.t2s.datasets.data_table import DataTable +from paddlespeech.t2s.utils import str2bool + +INITIALS = [ + 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'zh', 'ch', 'sh', + 'r', 'z', 'c', 's', 'j', 'q', 'x' +] +INITIALS += ['y', 'w', 'sp', 'spl', 'spn', 'sil'] + + +def intersperse(lst, item): + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result + + +def insert_after_character(lst, item): + result = [item] + for phone in lst: + result.append(phone) + if phone not in INITIALS: + # finals has tones + assert phone[-1] in "12345" + result.append(item) + return result + + +def add_blank(phones: List[str], + filed: str="character", + blank_token: str=""): + if filed == "phone": + """ + add blank after phones + input: ["n", "i3", "h", "ao3", "m", "a5"] + output: ["n", "", "i3", "", "h", "", "ao3", "", "m", "", "a5"] + """ + phones = intersperse(phones, blank_token) + elif filed == "character": + """ + add blank after characters + input: ["n", "i3", "h", "ao3"] + output: ["n", "i3", "", "h", "ao3", "", "m", "a5"] + """ + phones = insert_after_character(phones, blank_token) + return phones def main(): @@ -58,29 +103,12 @@ def main(): parser.add_argument( "--speaker-dict", type=str, default=None, help="speaker id map file.") parser.add_argument( - "--verbose", - type=int, - default=1, - help="logging level. higher is more logging. (default=1)") - args = parser.parse_args() + "--add-blank", + type=str2bool, + default=True, + help="whether to add blank between phones") - # set logger - if args.verbose > 1: - logging.basicConfig( - level=logging.DEBUG, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - elif args.verbose > 0: - logging.basicConfig( - level=logging.INFO, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - else: - logging.basicConfig( - level=logging.WARN, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - ) - logging.warning('Skip DEBUG/INFO messages') + args = parser.parse_args() dumpdir = Path(args.dumpdir).expanduser() # use absolute path @@ -135,13 +163,19 @@ def main(): else: wav_path = wave - phone_ids = [vocab_phones[p] for p in item['phones']] + phones = item['phones'] + text_lengths = item['text_lengths'] + if args.add_blank: + phones = add_blank(phones, filed="character") + text_lengths = len(phones) + + phone_ids = [vocab_phones[p] for p in phones] spk_id = vocab_speaker[item["speaker"]] record = { "utt_id": item['utt_id'], "text": phone_ids, - "text_lengths": item['text_lengths'], + "text_lengths": text_lengths, 'feats': str(feats_path), "feats_lengths": item['feats_lengths'], "wave": str(wav_path), diff --git a/paddlespeech/t2s/exps/vits/preprocess.py b/paddlespeech/t2s/exps/vits/preprocess.py index 6aa139fb5f8a32f965bd7a2b781500a011b344ff..f89ab356f1a3cf43747e80d51f13a60bbd9b3445 100644 --- a/paddlespeech/t2s/exps/vits/preprocess.py +++ b/paddlespeech/t2s/exps/vits/preprocess.py @@ -197,11 +197,6 @@ def main(): parser.add_argument("--config", type=str, help="fastspeech2 config file.") - parser.add_argument( - "--verbose", - type=int, - default=1, - help="logging level. higher is more logging. (default=1)") parser.add_argument( "--num-cpu", type=int, default=1, help="number of process.") @@ -236,10 +231,6 @@ def main(): with open(args.config, 'rt') as f: config = CfgNode(yaml.safe_load(f)) - if args.verbose > 1: - print(vars(args)) - print(config) - sentences, speaker_set = get_phn_dur(dur_file) merge_silence(sentences) diff --git a/paddlespeech/t2s/exps/vits/synthesize_e2e.py b/paddlespeech/t2s/exps/vits/synthesize_e2e.py index c82e5c03941288eee19d1b8f063105288daa8a12..33a4137519ace905733db815955f834a199e17cf 100644 --- a/paddlespeech/t2s/exps/vits/synthesize_e2e.py +++ b/paddlespeech/t2s/exps/vits/synthesize_e2e.py @@ -23,6 +23,7 @@ from yacs.config import CfgNode from paddlespeech.t2s.exps.syn_utils import get_frontend from paddlespeech.t2s.exps.syn_utils import get_sentences from paddlespeech.t2s.models.vits import VITS +from paddlespeech.t2s.utils import str2bool def evaluate(args): @@ -55,6 +56,7 @@ def evaluate(args): output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) merge_sentences = False + add_blank = args.add_blank N = 0 T = 0 @@ -62,7 +64,9 @@ def evaluate(args): with timer() as t: if args.lang == 'zh': input_ids = frontend.get_input_ids( - sentence, merge_sentences=merge_sentences) + sentence, + merge_sentences=merge_sentences, + add_blank=add_blank) phone_ids = input_ids["phone_ids"] elif args.lang == 'en': input_ids = frontend.get_input_ids( @@ -125,6 +129,12 @@ def parse_args(): help="text to synthesize, a 'utt_id sentence' pair per line.") parser.add_argument("--output_dir", type=str, help="output dir.") + parser.add_argument( + "--add-blank", + type=str2bool, + default=True, + help="whether to add blank between phones") + args = parser.parse_args() return args diff --git a/paddlespeech/t2s/exps/vits/train.py b/paddlespeech/t2s/exps/vits/train.py index dbda8b7177bca068ecaeabe41679a93e153aba35..1a68d13269ab1ed54a176106f3f31edd8fab3e97 100644 --- a/paddlespeech/t2s/exps/vits/train.py +++ b/paddlespeech/t2s/exps/vits/train.py @@ -211,13 +211,18 @@ def train_sp(args, config): generator_first=config.generator_first, output_dir=output_dir) - trainer = Trainer(updater, (config.max_epoch, 'epoch'), output_dir) + trainer = Trainer( + updater, + stop_trigger=(config.train_max_steps, "iteration"), + out=output_dir) if dist.get_rank() == 0: - trainer.extend(evaluator, trigger=(1, "epoch")) - trainer.extend(VisualDL(output_dir), trigger=(1, "iteration")) + trainer.extend( + evaluator, trigger=(config.eval_interval_steps, 'iteration')) + trainer.extend(VisualDL(output_dir), trigger=(1, 'iteration')) trainer.extend( - Snapshot(max_size=config.num_snapshots), trigger=(1, 'epoch')) + Snapshot(max_size=config.num_snapshots), + trigger=(config.save_interval_steps, 'iteration')) print("Trainer Done!") trainer.run() diff --git a/paddlespeech/t2s/exps/waveflow/preprocess.py b/paddlespeech/t2s/exps/waveflow/preprocess.py index ef3a29175896d7d02f7a9df4dcc930d33f9476af..c7034aeabf8987441749956a309771f67040dfc9 100644 --- a/paddlespeech/t2s/exps/waveflow/preprocess.py +++ b/paddlespeech/t2s/exps/waveflow/preprocess.py @@ -143,8 +143,6 @@ if __name__ == "__main__": nargs=argparse.REMAINDER, help="options to overwrite --config file and the default config, passing in KEY VALUE pairs" ) - parser.add_argument( - "-v", "--verbose", action="store_true", help="print msg") config = get_cfg_defaults() args = parser.parse_args() @@ -153,8 +151,5 @@ if __name__ == "__main__": if args.opts: config.merge_from_list(args.opts) config.freeze() - if args.verbose: - print(config.data) - print(args) create_dataset(config.data, args.input, args.output) diff --git a/paddlespeech/t2s/exps/waveflow/synthesize.py b/paddlespeech/t2s/exps/waveflow/synthesize.py index 53715b01ea0f89fd7cf19f18c4643e07f28d0422..a3190c6e52c12e98b9e2873a0859ac3dc221a459 100644 --- a/paddlespeech/t2s/exps/waveflow/synthesize.py +++ b/paddlespeech/t2s/exps/waveflow/synthesize.py @@ -72,8 +72,6 @@ if __name__ == "__main__": nargs=argparse.REMAINDER, help="options to overwrite --config file and the default config, passing in KEY VALUE pairs" ) - parser.add_argument( - "-v", "--verbose", action="store_true", help="print msg") args = parser.parse_args() if args.config: diff --git a/paddlespeech/t2s/frontend/zh_frontend.py b/paddlespeech/t2s/frontend/zh_frontend.py index 129aa944ed3ea1c7bb52a400101cf88c34be4578..143ccbc15d44623acdac8f5a0810b480af5d614a 100644 --- a/paddlespeech/t2s/frontend/zh_frontend.py +++ b/paddlespeech/t2s/frontend/zh_frontend.py @@ -29,6 +29,29 @@ from paddlespeech.t2s.frontend.generate_lexicon import generate_lexicon from paddlespeech.t2s.frontend.tone_sandhi import ToneSandhi from paddlespeech.t2s.frontend.zh_normalization.text_normlization import TextNormalizer +INITIALS = [ + 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'zh', 'ch', 'sh', + 'r', 'z', 'c', 's', 'j', 'q', 'x' +] +INITIALS += ['y', 'w', 'sp', 'spl', 'spn', 'sil'] + + +def intersperse(lst, item): + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result + + +def insert_after_character(lst, item): + result = [item] + for phone in lst: + result.append(phone) + if phone not in INITIALS: + # finals has tones + # assert phone[-1] in "12345" + result.append(item) + return result + class Frontend(): def __init__(self, @@ -280,12 +303,15 @@ class Frontend(): print("----------------------------") return phonemes - def get_input_ids(self, - sentence: str, - merge_sentences: bool=True, - get_tone_ids: bool=False, - robot: bool=False, - print_info: bool=False) -> Dict[str, List[paddle.Tensor]]: + def get_input_ids( + self, + sentence: str, + merge_sentences: bool=True, + get_tone_ids: bool=False, + robot: bool=False, + print_info: bool=False, + add_blank: bool=False, + blank_token: str="") -> Dict[str, List[paddle.Tensor]]: phonemes = self.get_phonemes( sentence, merge_sentences=merge_sentences, @@ -299,6 +325,10 @@ class Frontend(): for part_phonemes in phonemes: phones, tones = self._get_phone_tone( part_phonemes, get_tone_ids=get_tone_ids) + + if add_blank: + phones = insert_after_character(phones, blank_token) + if tones: tone_ids = self._t2id(tones) tone_ids = paddle.to_tensor(tone_ids) diff --git a/paddlespeech/t2s/models/vits/vits.py b/paddlespeech/t2s/models/vits/vits.py index ab8eda26d0b9b2118fa2b06b6f9ea546abb74873..5c476be77d747dc2fb2a6879640fed1258c6bcc8 100644 --- a/paddlespeech/t2s/models/vits/vits.py +++ b/paddlespeech/t2s/models/vits/vits.py @@ -227,11 +227,7 @@ class VITS(nn.Layer): lids (Optional[Tensor]): Language index tensor (B,) or (B, 1). forward_generator (bool): Whether to forward generator. Returns: - Dict[str, Any]: - - loss (Tensor): Loss scalar tensor. - - stats (Dict[str, float]): Statistics to be monitored. - - weight (Tensor): Weight tensor to summarize losses. - - optim_idx (int): Optimizer index (0 for G and 1 for D). + """ if forward_generator: return self._forward_generator( diff --git a/paddlespeech/utils/env.py b/paddlespeech/utils/env.py new file mode 100644 index 0000000000000000000000000000000000000000..03c8757bc2b16d810962ac862a743570d729fb19 --- /dev/null +++ b/paddlespeech/utils/env.py @@ -0,0 +1,46 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + + +def _get_user_home(): + return os.path.expanduser('~') + + +def _get_paddlespcceh_home(): + if 'PPSPEECH_HOME' in os.environ: + home_path = os.environ['PPSPEECH_HOME'] + if os.path.exists(home_path): + if os.path.isdir(home_path): + return home_path + else: + raise RuntimeError( + 'The environment variable PPSPEECH_HOME {} is not a directory.'. + format(home_path)) + else: + return home_path + return os.path.join(_get_user_home(), '.paddlespeech') + + +def _get_sub_home(directory): + home = os.path.join(_get_paddlespcceh_home(), directory) + if not os.path.exists(home): + os.makedirs(home) + return home + + +PPSPEECH_HOME = _get_paddlespcceh_home() +MODEL_HOME = _get_sub_home('models') +CONF_HOME = _get_sub_home('conf') +DATA_HOME = _get_sub_home('datasets') diff --git a/speechx/examples/ds2_ol/onnx/README.md b/speechx/examples/ds2_ol/onnx/README.md index 566a4597d805118ed5753be71b2ac96ccb2548ec..eaea8b6e8cc43c3435d84ad07d16c9658218634e 100644 --- a/speechx/examples/ds2_ol/onnx/README.md +++ b/speechx/examples/ds2_ol/onnx/README.md @@ -9,7 +9,7 @@ Please make sure [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX) and The example test with these packages installed: ``` -paddle2onnx 0.9.8rc0 # develop af4354b4e9a61a93be6490640059a02a4499bc7a +paddle2onnx 0.9.8 # develop 62c5424e22cd93968dc831216fc9e0f0fce3d819 paddleaudio 0.2.1 paddlefsl 1.1.0 paddlenlp 2.2.6 diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py b/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py index c41e66b725b8a288cb7fda856f0fd448fd16c304..2d364c25294f8cb53766b3ed49ed84f4928ab161 100755 --- a/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py +++ b/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py @@ -492,6 +492,8 @@ class SymbolicShapeInference: skip_infer = node.op_type in [ 'If', 'Loop', 'Scan', 'SplitToSequence', 'ZipMap', \ # contrib ops + + 'Attention', 'BiasGelu', \ 'EmbedLayerNormalization', \ 'FastGelu', 'Gelu', 'LayerNormalization', \ diff --git a/speechx/examples/ds2_ol/onnx/local/ort_opt.py b/speechx/examples/ds2_ol/onnx/local/ort_opt.py new file mode 100755 index 0000000000000000000000000000000000000000..8e995bcf0a70585d413d35c41cc1e286de158a8b --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/local/ort_opt.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 +import argparse + +import onnxruntime as ort + +# onnxruntime optimizer. +# https://onnxruntime.ai/docs/performance/graph-optimizations.html +# https://onnxruntime.ai/docs/api/python/api_summary.html#api + + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--model_in', required=True, type=str, help='Path to onnx model.') + parser.add_argument( + '--opt_level', + required=True, + type=int, + default=0, + choices=[0, 1, 2], + help='Path to onnx model.') + parser.add_argument( + '--model_out', required=True, help='path to save the optimized model.') + parser.add_argument('--debug', default=False, help='output debug info.') + return parser.parse_args() + + +if __name__ == '__main__': + args = parse_arguments() + + sess_options = ort.SessionOptions() + + # Set graph optimization level + print(f"opt level: {args.opt_level}") + if args.opt_level == 0: + sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_BASIC + elif args.opt_level == 1: + sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED + else: + sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL + + # To enable model serialization after graph optimization set this + sess_options.optimized_model_filepath = args.model_out + + session = ort.InferenceSession(args.model_in, sess_options) diff --git a/speechx/examples/ds2_ol/onnx/run.sh b/speechx/examples/ds2_ol/onnx/run.sh index 57cd9416790f3e66383d3bc5b7e7ef4cf3906d8e..583abda4eee78073f677c33083fd50d67a42fb42 100755 --- a/speechx/examples/ds2_ol/onnx/run.sh +++ b/speechx/examples/ds2_ol/onnx/run.sh @@ -5,10 +5,11 @@ set -e . path.sh stage=0 -stop_stage=100 -#tarfile=asr0_deepspeech2_online_wenetspeech_ckpt_1.0.2.model.tar.gz -tarfile=asr0_deepspeech2_online_aishell_fbank161_ckpt_1.0.1.model.tar.gz -model_prefix=avg_1.jit +stop_stage=50 +tarfile=asr0_deepspeech2_online_wenetspeech_ckpt_1.0.2.model.tar.gz +#tarfile=asr0_deepspeech2_online_aishell_fbank161_ckpt_1.0.1.model.tar.gz +model_prefix=avg_10.jit +#model_prefix=avg_1.jit model=${model_prefix}.pdmodel param=${model_prefix}.pdiparams @@ -80,6 +81,14 @@ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ];then fi +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ] ;then + # ort graph optmize + ./local/ort_opt.py --model_in $exp/model.onnx --opt_level 0 --model_out $exp/model.ort.opt.onnx + + ./local/infer_check.py --input_file $input_file --model_type $model_type --model_dir $dir --model_prefix $model_prefix --onnx_model $exp/model.ort.opt.onnx +fi + + # aishell rnn hidden is 1024 # wenetspeech rnn hiddn is 2048 if [ $model_type == 'aishell' ];then @@ -90,9 +99,9 @@ else echo "not support: $model_type" exit -1 fi - -if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ] ;then + +if [ ${stage} -le 51 ] && [ ${stop_stage} -ge 51 ] ;then # wenetspeech ds2 model execed 2GB limit, will error. # simplifying onnx model ./local/onnx_opt.sh $exp/model.onnx $exp/model.opt.onnx "$input_shape"