diff --git a/paddlespeech/resource/pretrained_models.py b/paddlespeech/resource/pretrained_models.py index cde7fe4c4f57e061c442d2b81ec428dd9e529c4f..f0a6ef31a93720f7d2ae9617d3ad053cdbc87f73 100644 --- a/paddlespeech/resource/pretrained_models.py +++ b/paddlespeech/resource/pretrained_models.py @@ -175,7 +175,7 @@ asr_dynamic_pretrained_models = { 'ckpt_path':'exp/deepspeech2_online/checkpoints/avg_1', 'model':'exp/deepspeech2_online/checkpoints/avg_1.jit.pdmodel', 'params':'exp/deepspeech2_online/checkpoints/avg_1.jit.pdiparams', - 'onnx_model': 'onnx/model.onnx' + 'onnx_model': 'onnx/model.onnx', 'lm_url':'https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm', 'lm_md5':'29e02312deb2e59b3c8686c7966d4fe3' }, @@ -228,7 +228,7 @@ asr_static_pretrained_models = { 'ckpt_path':'exp/deepspeech2_online/checkpoints/avg_1', 'model':'exp/deepspeech2_online/checkpoints/avg_1.jit.pdmodel', 'params':'exp/deepspeech2_online/checkpoints/avg_1.jit.pdiparams', - 'onnx_model': 'onnx/model.onnx' + 'onnx_model': 'onnx/model.onnx', 'lm_url':'https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm', 'lm_md5':'29e02312deb2e59b3c8686c7966d4fe3' }, @@ -246,7 +246,7 @@ asr_onnx_pretrained_models = { 'ckpt_path':'exp/deepspeech2_online/checkpoints/avg_1', 'model':'exp/deepspeech2_online/checkpoints/avg_1.jit.pdmodel', 'params':'exp/deepspeech2_online/checkpoints/avg_1.jit.pdiparams', - 'onnx_model': 'onnx/model.onnx' + 'onnx_model': 'onnx/model.onnx', 'lm_url':'https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm', 'lm_md5':'29e02312deb2e59b3c8686c7966d4fe3' }, diff --git a/speechx/examples/ds2_ol/onnx/local/infer_check.py b/speechx/examples/ds2_ol/onnx/local/infer_check.py index a5ec7ce347715da081516f19d81c374f321189a5..f821baa12e73a2ed8a85e468b98ed6503bcf64b8 100755 --- a/speechx/examples/ds2_ol/onnx/local/infer_check.py +++ b/speechx/examples/ds2_ol/onnx/local/infer_check.py @@ -27,7 +27,8 @@ def parse_args(): '--input_file', type=str, default="static_ds2online_inputs.pickle", - help="aishell ds2 input data file. For wenetspeech, we only feed for infer model", ) + help="aishell ds2 input data file. For wenetspeech, we only feed for infer model", + ) parser.add_argument( '--model_type', type=str, @@ -57,7 +58,6 @@ if __name__ == '__main__': iodict = pickle.load(f) print(iodict.keys()) - audio_chunk = iodict['audio_chunk'] audio_chunk_lens = iodict['audio_chunk_lens'] chunk_state_h_box = iodict['chunk_state_h_box']